triton-windows 3.1.0.post17__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of triton-windows might be problematic. Click here for more details.
- triton/_C/libtriton.pyd +0 -0
- triton/__init__.py +73 -0
- triton/backends/__init__.py +50 -0
- triton/backends/amd/compiler.py +262 -0
- triton/backends/amd/driver.c +211 -0
- triton/backends/amd/driver.py +497 -0
- triton/backends/amd/include/hip/amd_detail/amd_channel_descriptor.h +358 -0
- triton/backends/amd/include/hip/amd_detail/amd_device_functions.h +1031 -0
- triton/backends/amd/include/hip/amd_detail/amd_hip_atomic.h +1612 -0
- triton/backends/amd/include/hip/amd_detail/amd_hip_bf16.h +1337 -0
- triton/backends/amd/include/hip/amd_detail/amd_hip_bfloat16.h +293 -0
- triton/backends/amd/include/hip/amd_detail/amd_hip_common.h +32 -0
- triton/backends/amd/include/hip/amd_detail/amd_hip_complex.h +174 -0
- triton/backends/amd/include/hip/amd_detail/amd_hip_cooperative_groups.h +829 -0
- triton/backends/amd/include/hip/amd_detail/amd_hip_fp16.h +1809 -0
- triton/backends/amd/include/hip/amd_detail/amd_hip_gl_interop.h +108 -0
- triton/backends/amd/include/hip/amd_detail/amd_hip_math_constants.h +124 -0
- triton/backends/amd/include/hip/amd_detail/amd_hip_runtime.h +405 -0
- triton/backends/amd/include/hip/amd_detail/amd_hip_runtime_pt_api.h +196 -0
- triton/backends/amd/include/hip/amd_detail/amd_hip_unsafe_atomics.h +565 -0
- triton/backends/amd/include/hip/amd_detail/amd_hip_vector_types.h +2226 -0
- triton/backends/amd/include/hip/amd_detail/amd_math_functions.h +104 -0
- triton/backends/amd/include/hip/amd_detail/amd_surface_functions.h +244 -0
- triton/backends/amd/include/hip/amd_detail/amd_warp_functions.h +494 -0
- triton/backends/amd/include/hip/amd_detail/concepts.hpp +30 -0
- triton/backends/amd/include/hip/amd_detail/device_library_decls.h +133 -0
- triton/backends/amd/include/hip/amd_detail/functional_grid_launch.hpp +218 -0
- triton/backends/amd/include/hip/amd_detail/grid_launch.h +67 -0
- triton/backends/amd/include/hip/amd_detail/grid_launch.hpp +50 -0
- triton/backends/amd/include/hip/amd_detail/grid_launch_GGL.hpp +26 -0
- triton/backends/amd/include/hip/amd_detail/helpers.hpp +137 -0
- triton/backends/amd/include/hip/amd_detail/hip_api_trace.hpp +1350 -0
- triton/backends/amd/include/hip/amd_detail/hip_assert.h +101 -0
- triton/backends/amd/include/hip/amd_detail/hip_cooperative_groups_helper.h +242 -0
- triton/backends/amd/include/hip/amd_detail/hip_fp16_gcc.h +254 -0
- triton/backends/amd/include/hip/amd_detail/hip_fp16_math_fwd.h +96 -0
- triton/backends/amd/include/hip/amd_detail/hip_ldg.h +100 -0
- triton/backends/amd/include/hip/amd_detail/hip_prof_str.h +10169 -0
- triton/backends/amd/include/hip/amd_detail/hip_runtime_prof.h +77 -0
- triton/backends/amd/include/hip/amd_detail/host_defines.h +180 -0
- triton/backends/amd/include/hip/amd_detail/hsa_helpers.hpp +102 -0
- triton/backends/amd/include/hip/amd_detail/macro_based_grid_launch.hpp +798 -0
- triton/backends/amd/include/hip/amd_detail/math_fwd.h +698 -0
- triton/backends/amd/include/hip/amd_detail/ockl_image.h +177 -0
- triton/backends/amd/include/hip/amd_detail/program_state.hpp +107 -0
- triton/backends/amd/include/hip/amd_detail/texture_fetch_functions.h +491 -0
- triton/backends/amd/include/hip/amd_detail/texture_indirect_functions.h +478 -0
- triton/backends/amd/include/hip/channel_descriptor.h +39 -0
- triton/backends/amd/include/hip/device_functions.h +38 -0
- triton/backends/amd/include/hip/driver_types.h +468 -0
- triton/backends/amd/include/hip/hip_bf16.h +36 -0
- triton/backends/amd/include/hip/hip_bfloat16.h +44 -0
- triton/backends/amd/include/hip/hip_common.h +100 -0
- triton/backends/amd/include/hip/hip_complex.h +38 -0
- triton/backends/amd/include/hip/hip_cooperative_groups.h +46 -0
- triton/backends/amd/include/hip/hip_deprecated.h +95 -0
- triton/backends/amd/include/hip/hip_ext.h +159 -0
- triton/backends/amd/include/hip/hip_fp16.h +36 -0
- triton/backends/amd/include/hip/hip_gl_interop.h +32 -0
- triton/backends/amd/include/hip/hip_hcc.h +24 -0
- triton/backends/amd/include/hip/hip_math_constants.h +36 -0
- triton/backends/amd/include/hip/hip_profile.h +27 -0
- triton/backends/amd/include/hip/hip_runtime.h +75 -0
- triton/backends/amd/include/hip/hip_runtime_api.h +8919 -0
- triton/backends/amd/include/hip/hip_texture_types.h +29 -0
- triton/backends/amd/include/hip/hip_vector_types.h +41 -0
- triton/backends/amd/include/hip/hip_version.h +17 -0
- triton/backends/amd/include/hip/hiprtc.h +421 -0
- triton/backends/amd/include/hip/library_types.h +78 -0
- triton/backends/amd/include/hip/math_functions.h +42 -0
- triton/backends/amd/include/hip/surface_types.h +63 -0
- triton/backends/amd/include/hip/texture_types.h +194 -0
- triton/backends/amd/include/hsa/Brig.h +1131 -0
- triton/backends/amd/include/hsa/amd_hsa_common.h +91 -0
- triton/backends/amd/include/hsa/amd_hsa_elf.h +435 -0
- triton/backends/amd/include/hsa/amd_hsa_kernel_code.h +269 -0
- triton/backends/amd/include/hsa/amd_hsa_queue.h +109 -0
- triton/backends/amd/include/hsa/amd_hsa_signal.h +80 -0
- triton/backends/amd/include/hsa/hsa.h +5729 -0
- triton/backends/amd/include/hsa/hsa_amd_tool.h +91 -0
- triton/backends/amd/include/hsa/hsa_api_trace.h +566 -0
- triton/backends/amd/include/hsa/hsa_ext_amd.h +3090 -0
- triton/backends/amd/include/hsa/hsa_ext_finalize.h +531 -0
- triton/backends/amd/include/hsa/hsa_ext_image.h +1454 -0
- triton/backends/amd/include/hsa/hsa_ven_amd_aqlprofile.h +488 -0
- triton/backends/amd/include/hsa/hsa_ven_amd_loader.h +667 -0
- triton/backends/amd/include/roctracer/ext/prof_protocol.h +107 -0
- triton/backends/amd/include/roctracer/hip_ostream_ops.h +4435 -0
- triton/backends/amd/include/roctracer/hsa_ostream_ops.h +1467 -0
- triton/backends/amd/include/roctracer/hsa_prof_str.h +3027 -0
- triton/backends/amd/include/roctracer/roctracer.h +779 -0
- triton/backends/amd/include/roctracer/roctracer_ext.h +81 -0
- triton/backends/amd/include/roctracer/roctracer_hcc.h +24 -0
- triton/backends/amd/include/roctracer/roctracer_hip.h +37 -0
- triton/backends/amd/include/roctracer/roctracer_hsa.h +112 -0
- triton/backends/amd/include/roctracer/roctracer_plugin.h +137 -0
- triton/backends/amd/include/roctracer/roctracer_roctx.h +67 -0
- triton/backends/amd/include/roctracer/roctx.h +229 -0
- triton/backends/amd/lib/ockl.bc +0 -0
- triton/backends/amd/lib/ocml.bc +0 -0
- triton/backends/compiler.py +76 -0
- triton/backends/driver.py +34 -0
- triton/backends/nvidia/__init__.py +0 -0
- triton/backends/nvidia/bin/ptxas.exe +0 -0
- triton/backends/nvidia/compiler.py +347 -0
- triton/backends/nvidia/driver.c +451 -0
- triton/backends/nvidia/driver.py +430 -0
- triton/backends/nvidia/include/cuda.h +24359 -0
- triton/backends/nvidia/lib/libdevice.10.bc +0 -0
- triton/backends/nvidia/lib/x64/cuda.lib +0 -0
- triton/compiler/__init__.py +4 -0
- triton/compiler/code_generator.py +1302 -0
- triton/compiler/compiler.py +416 -0
- triton/compiler/errors.py +51 -0
- triton/compiler/make_launcher.py +0 -0
- triton/errors.py +5 -0
- triton/language/__init__.py +284 -0
- triton/language/core.py +2621 -0
- triton/language/extra/__init__.py +4 -0
- triton/language/extra/cuda/__init__.py +8 -0
- triton/language/extra/cuda/libdevice.py +1629 -0
- triton/language/extra/cuda/utils.py +109 -0
- triton/language/extra/hip/__init__.py +3 -0
- triton/language/extra/hip/libdevice.py +468 -0
- triton/language/extra/libdevice.py +1213 -0
- triton/language/math.py +250 -0
- triton/language/random.py +207 -0
- triton/language/semantic.py +1621 -0
- triton/language/standard.py +441 -0
- triton/ops/__init__.py +7 -0
- triton/ops/blocksparse/__init__.py +7 -0
- triton/ops/blocksparse/matmul.py +432 -0
- triton/ops/blocksparse/softmax.py +228 -0
- triton/ops/cross_entropy.py +96 -0
- triton/ops/flash_attention.py +466 -0
- triton/ops/matmul.py +219 -0
- triton/ops/matmul_perf_model.py +171 -0
- triton/runtime/__init__.py +23 -0
- triton/runtime/autotuner.py +361 -0
- triton/runtime/build.py +129 -0
- triton/runtime/cache.py +289 -0
- triton/runtime/driver.py +60 -0
- triton/runtime/errors.py +26 -0
- triton/runtime/interpreter.py +1127 -0
- triton/runtime/jit.py +956 -0
- triton/runtime/tcc/include/_mingw.h +170 -0
- triton/runtime/tcc/include/assert.h +57 -0
- triton/runtime/tcc/include/conio.h +409 -0
- triton/runtime/tcc/include/ctype.h +281 -0
- triton/runtime/tcc/include/dir.h +31 -0
- triton/runtime/tcc/include/direct.h +68 -0
- triton/runtime/tcc/include/dirent.h +135 -0
- triton/runtime/tcc/include/dos.h +55 -0
- triton/runtime/tcc/include/errno.h +75 -0
- triton/runtime/tcc/include/excpt.h +123 -0
- triton/runtime/tcc/include/fcntl.h +52 -0
- triton/runtime/tcc/include/fenv.h +108 -0
- triton/runtime/tcc/include/float.h +57 -0
- triton/runtime/tcc/include/inttypes.h +297 -0
- triton/runtime/tcc/include/io.h +418 -0
- triton/runtime/tcc/include/limits.h +111 -0
- triton/runtime/tcc/include/locale.h +91 -0
- triton/runtime/tcc/include/malloc.h +181 -0
- triton/runtime/tcc/include/math.h +737 -0
- triton/runtime/tcc/include/mem.h +13 -0
- triton/runtime/tcc/include/memory.h +40 -0
- triton/runtime/tcc/include/process.h +176 -0
- triton/runtime/tcc/include/sec_api/conio_s.h +42 -0
- triton/runtime/tcc/include/sec_api/crtdbg_s.h +19 -0
- triton/runtime/tcc/include/sec_api/io_s.h +33 -0
- triton/runtime/tcc/include/sec_api/mbstring_s.h +52 -0
- triton/runtime/tcc/include/sec_api/search_s.h +25 -0
- triton/runtime/tcc/include/sec_api/stdio_s.h +145 -0
- triton/runtime/tcc/include/sec_api/stdlib_s.h +67 -0
- triton/runtime/tcc/include/sec_api/stralign_s.h +30 -0
- triton/runtime/tcc/include/sec_api/string_s.h +41 -0
- triton/runtime/tcc/include/sec_api/sys/timeb_s.h +34 -0
- triton/runtime/tcc/include/sec_api/tchar_s.h +266 -0
- triton/runtime/tcc/include/sec_api/time_s.h +61 -0
- triton/runtime/tcc/include/sec_api/wchar_s.h +128 -0
- triton/runtime/tcc/include/setjmp.h +160 -0
- triton/runtime/tcc/include/share.h +28 -0
- triton/runtime/tcc/include/signal.h +63 -0
- triton/runtime/tcc/include/stdarg.h +79 -0
- triton/runtime/tcc/include/stdbool.h +11 -0
- triton/runtime/tcc/include/stddef.h +54 -0
- triton/runtime/tcc/include/stdint.h +212 -0
- triton/runtime/tcc/include/stdio.h +429 -0
- triton/runtime/tcc/include/stdlib.h +580 -0
- triton/runtime/tcc/include/string.h +164 -0
- triton/runtime/tcc/include/sys/fcntl.h +13 -0
- triton/runtime/tcc/include/sys/file.h +14 -0
- triton/runtime/tcc/include/sys/locking.h +30 -0
- triton/runtime/tcc/include/sys/stat.h +290 -0
- triton/runtime/tcc/include/sys/time.h +69 -0
- triton/runtime/tcc/include/sys/timeb.h +133 -0
- triton/runtime/tcc/include/sys/types.h +118 -0
- triton/runtime/tcc/include/sys/unistd.h +14 -0
- triton/runtime/tcc/include/sys/utime.h +146 -0
- triton/runtime/tcc/include/tcc/tcc_libm.h +201 -0
- triton/runtime/tcc/include/tcclib.h +80 -0
- triton/runtime/tcc/include/tchar.h +1102 -0
- triton/runtime/tcc/include/time.h +287 -0
- triton/runtime/tcc/include/vadefs.h +11 -0
- triton/runtime/tcc/include/values.h +4 -0
- triton/runtime/tcc/include/varargs.h +12 -0
- triton/runtime/tcc/include/wchar.h +873 -0
- triton/runtime/tcc/include/wctype.h +172 -0
- triton/runtime/tcc/include/winapi/basetsd.h +149 -0
- triton/runtime/tcc/include/winapi/basetyps.h +85 -0
- triton/runtime/tcc/include/winapi/guiddef.h +156 -0
- triton/runtime/tcc/include/winapi/poppack.h +8 -0
- triton/runtime/tcc/include/winapi/pshpack1.h +8 -0
- triton/runtime/tcc/include/winapi/pshpack2.h +8 -0
- triton/runtime/tcc/include/winapi/pshpack4.h +8 -0
- triton/runtime/tcc/include/winapi/pshpack8.h +8 -0
- triton/runtime/tcc/include/winapi/winbase.h +2951 -0
- triton/runtime/tcc/include/winapi/wincon.h +301 -0
- triton/runtime/tcc/include/winapi/windef.h +293 -0
- triton/runtime/tcc/include/winapi/windows.h +127 -0
- triton/runtime/tcc/include/winapi/winerror.h +3166 -0
- triton/runtime/tcc/include/winapi/wingdi.h +4080 -0
- triton/runtime/tcc/include/winapi/winnt.h +5835 -0
- triton/runtime/tcc/include/winapi/winreg.h +272 -0
- triton/runtime/tcc/include/winapi/winuser.h +5651 -0
- triton/runtime/tcc/include/winapi/winver.h +160 -0
- triton/runtime/tcc/lib/cuda.def +697 -0
- triton/runtime/tcc/lib/gdi32.def +337 -0
- triton/runtime/tcc/lib/kernel32.def +770 -0
- triton/runtime/tcc/lib/libtcc1-64.a +0 -0
- triton/runtime/tcc/lib/msvcrt.def +1399 -0
- triton/runtime/tcc/lib/python3.def +810 -0
- triton/runtime/tcc/lib/user32.def +658 -0
- triton/runtime/tcc/libtcc.dll +0 -0
- triton/runtime/tcc/tcc.exe +0 -0
- triton/testing.py +496 -0
- triton/tools/__init__.py +0 -0
- triton/tools/build_extern.py +365 -0
- triton/tools/compile.c +67 -0
- triton/tools/compile.h +14 -0
- triton/tools/compile.py +145 -0
- triton/tools/disasm.py +142 -0
- triton/tools/link.py +322 -0
- triton/windows_utils.py +373 -0
- triton_windows-3.1.0.post17.dist-info/METADATA +41 -0
- triton_windows-3.1.0.post17.dist-info/RECORD +248 -0
- triton_windows-3.1.0.post17.dist-info/WHEEL +5 -0
- triton_windows-3.1.0.post17.dist-info/top_level.txt +14 -0
|
@@ -0,0 +1,451 @@
|
|
|
1
|
+
#include "cuda.h"
|
|
2
|
+
|
|
3
|
+
#ifndef _WIN32
|
|
4
|
+
#include <dlfcn.h>
|
|
5
|
+
#else
|
|
6
|
+
#define WIN32_LEAN_AND_MEAN
|
|
7
|
+
#include <windows.h>
|
|
8
|
+
#endif
|
|
9
|
+
|
|
10
|
+
#include <stdbool.h>
|
|
11
|
+
#define PY_SSIZE_T_CLEAN
|
|
12
|
+
#include <Python.h>
|
|
13
|
+
// #include <stdatomic.h>
|
|
14
|
+
|
|
15
|
+
// Raises a Python exception and returns false if code is not CUDA_SUCCESS.
|
|
16
|
+
static bool gpuAssert(CUresult code, const char *file, int line) {
|
|
17
|
+
if (code == CUDA_SUCCESS)
|
|
18
|
+
return true;
|
|
19
|
+
|
|
20
|
+
const char *prefix = "Triton Error [CUDA]: ";
|
|
21
|
+
const char *str;
|
|
22
|
+
cuGetErrorString(code, &str);
|
|
23
|
+
char err[1024] = {0};
|
|
24
|
+
strcat(err, prefix);
|
|
25
|
+
strcat(err, str);
|
|
26
|
+
PyGILState_STATE gil_state;
|
|
27
|
+
gil_state = PyGILState_Ensure();
|
|
28
|
+
PyErr_SetString(PyExc_RuntimeError, err);
|
|
29
|
+
PyGILState_Release(gil_state);
|
|
30
|
+
return false;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
// To be used only *outside* a Py_{BEGIN,END}_ALLOW_THREADS block.
|
|
34
|
+
#define CUDA_CHECK_AND_RETURN_NULL(ans) \
|
|
35
|
+
do { \
|
|
36
|
+
if (!gpuAssert((ans), __FILE__, __LINE__)) \
|
|
37
|
+
return NULL; \
|
|
38
|
+
} while (0)
|
|
39
|
+
|
|
40
|
+
// To be used inside a Py_{BEGIN,END}_ALLOW_THREADS block.
|
|
41
|
+
#define CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(ans) \
|
|
42
|
+
do { \
|
|
43
|
+
if (!gpuAssert((ans), __FILE__, __LINE__)) { \
|
|
44
|
+
PyEval_RestoreThread(_save); \
|
|
45
|
+
return NULL; \
|
|
46
|
+
} \
|
|
47
|
+
} while (0)
|
|
48
|
+
|
|
49
|
+
// Used to check if functions exist in old CUDA driver versions.
|
|
50
|
+
#define INITIALIZE_FUNCTION_POINTER_IF_NULL(funcPointer, initializerFunction) \
|
|
51
|
+
do { \
|
|
52
|
+
if ((funcPointer) == NULL) { \
|
|
53
|
+
(funcPointer) = (initializerFunction)(); \
|
|
54
|
+
if ((funcPointer) == NULL) { \
|
|
55
|
+
return NULL; \
|
|
56
|
+
} \
|
|
57
|
+
} \
|
|
58
|
+
} while (0)
|
|
59
|
+
|
|
60
|
+
static PyObject *getDeviceProperties(PyObject *self, PyObject *args) {
|
|
61
|
+
int device_id;
|
|
62
|
+
if (!PyArg_ParseTuple(args, "i", &device_id))
|
|
63
|
+
return NULL;
|
|
64
|
+
// Get device handle
|
|
65
|
+
CUdevice device;
|
|
66
|
+
cuDeviceGet(&device, device_id);
|
|
67
|
+
|
|
68
|
+
// create a struct to hold device properties
|
|
69
|
+
int max_shared_mem;
|
|
70
|
+
int max_num_regs;
|
|
71
|
+
int multiprocessor_count;
|
|
72
|
+
int warp_size;
|
|
73
|
+
int sm_clock_rate;
|
|
74
|
+
int mem_clock_rate;
|
|
75
|
+
int mem_bus_width;
|
|
76
|
+
CUDA_CHECK_AND_RETURN_NULL(cuDeviceGetAttribute(
|
|
77
|
+
&max_shared_mem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
|
|
78
|
+
device));
|
|
79
|
+
CUDA_CHECK_AND_RETURN_NULL(cuDeviceGetAttribute(
|
|
80
|
+
&max_num_regs, CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK, device));
|
|
81
|
+
CUDA_CHECK_AND_RETURN_NULL(cuDeviceGetAttribute(
|
|
82
|
+
&multiprocessor_count, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device));
|
|
83
|
+
CUDA_CHECK_AND_RETURN_NULL(
|
|
84
|
+
cuDeviceGetAttribute(&warp_size, CU_DEVICE_ATTRIBUTE_WARP_SIZE, device));
|
|
85
|
+
CUDA_CHECK_AND_RETURN_NULL(cuDeviceGetAttribute(
|
|
86
|
+
&sm_clock_rate, CU_DEVICE_ATTRIBUTE_CLOCK_RATE, device));
|
|
87
|
+
CUDA_CHECK_AND_RETURN_NULL(cuDeviceGetAttribute(
|
|
88
|
+
&mem_clock_rate, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, device));
|
|
89
|
+
CUDA_CHECK_AND_RETURN_NULL(cuDeviceGetAttribute(
|
|
90
|
+
&mem_bus_width, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, device));
|
|
91
|
+
|
|
92
|
+
return Py_BuildValue("{s:i, s:i, s:i, s:i, s:i, s:i, s:i}", "max_shared_mem",
|
|
93
|
+
max_shared_mem, "max_num_regs", max_num_regs,
|
|
94
|
+
"multiprocessor_count", multiprocessor_count, "warpSize",
|
|
95
|
+
warp_size, "sm_clock_rate", sm_clock_rate,
|
|
96
|
+
"mem_clock_rate", mem_clock_rate, "mem_bus_width",
|
|
97
|
+
mem_bus_width);
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
static PyObject *loadBinary(PyObject *self, PyObject *args) {
|
|
101
|
+
const char *name;
|
|
102
|
+
const char *data;
|
|
103
|
+
Py_ssize_t data_size;
|
|
104
|
+
int shared;
|
|
105
|
+
int device;
|
|
106
|
+
if (!PyArg_ParseTuple(args, "ss#ii", &name, &data, &data_size, &shared,
|
|
107
|
+
&device)) {
|
|
108
|
+
return NULL;
|
|
109
|
+
}
|
|
110
|
+
CUfunction fun;
|
|
111
|
+
CUmodule mod;
|
|
112
|
+
int32_t n_regs = 0;
|
|
113
|
+
int32_t n_spills = 0;
|
|
114
|
+
// create driver handles
|
|
115
|
+
CUcontext pctx = 0;
|
|
116
|
+
|
|
117
|
+
Py_BEGIN_ALLOW_THREADS;
|
|
118
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(cuCtxGetCurrent(&pctx));
|
|
119
|
+
if (!pctx) {
|
|
120
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(
|
|
121
|
+
cuDevicePrimaryCtxRetain(&pctx, device));
|
|
122
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(cuCtxSetCurrent(pctx));
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(cuModuleLoadData(&mod, data));
|
|
126
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(
|
|
127
|
+
cuModuleGetFunction(&fun, mod, name));
|
|
128
|
+
// get allocated registers and spilled registers from the function
|
|
129
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(
|
|
130
|
+
cuFuncGetAttribute(&n_regs, CU_FUNC_ATTRIBUTE_NUM_REGS, fun));
|
|
131
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(
|
|
132
|
+
cuFuncGetAttribute(&n_spills, CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES, fun));
|
|
133
|
+
n_spills /= 4;
|
|
134
|
+
// set dynamic shared memory if necessary
|
|
135
|
+
int shared_optin;
|
|
136
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(cuDeviceGetAttribute(
|
|
137
|
+
&shared_optin, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
|
|
138
|
+
device));
|
|
139
|
+
if (shared > 49152 && shared_optin > 49152) {
|
|
140
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(
|
|
141
|
+
cuFuncSetCacheConfig(fun, CU_FUNC_CACHE_PREFER_SHARED));
|
|
142
|
+
int shared_total, shared_static;
|
|
143
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(cuDeviceGetAttribute(
|
|
144
|
+
&shared_total, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR,
|
|
145
|
+
device));
|
|
146
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(cuFuncGetAttribute(
|
|
147
|
+
&shared_static, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, fun));
|
|
148
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(
|
|
149
|
+
cuFuncSetAttribute(fun, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
|
|
150
|
+
shared_optin - shared_static));
|
|
151
|
+
}
|
|
152
|
+
Py_END_ALLOW_THREADS;
|
|
153
|
+
|
|
154
|
+
if (PyErr_Occurred()) {
|
|
155
|
+
return NULL;
|
|
156
|
+
}
|
|
157
|
+
return Py_BuildValue("(KKii)", (uint64_t)mod, (uint64_t)fun, n_regs,
|
|
158
|
+
n_spills);
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
typedef CUresult (*cuOccupancyMaxActiveClusters_t)(
|
|
162
|
+
int *numClusters, CUfunction func, const CUlaunchConfig *config);
|
|
163
|
+
|
|
164
|
+
typedef CUresult (*cuTensorMapEncodeTiled_t)(
|
|
165
|
+
CUtensorMap *tensorMap, CUtensorMapDataType tensorDataType,
|
|
166
|
+
cuuint32_t tensorRank, void *globalAddress, const cuuint64_t *globalDim,
|
|
167
|
+
const cuuint64_t *globalStrides, const cuuint32_t *boxDim,
|
|
168
|
+
const cuuint32_t *elementStrides, CUtensorMapInterleave interleave,
|
|
169
|
+
CUtensorMapSwizzle swizzle, CUtensorMapL2promotion l2Promotion,
|
|
170
|
+
CUtensorMapFloatOOBfill oobFill);
|
|
171
|
+
|
|
172
|
+
#ifndef _WIN32
|
|
173
|
+
#define defineGetFunctionHandle(name, symbolName) \
|
|
174
|
+
static symbolName##_t name() { \
|
|
175
|
+
/* Open the shared library */ \
|
|
176
|
+
void *libHandle = dlopen("libcuda.so.1", RTLD_LAZY); \
|
|
177
|
+
if (!libHandle) { \
|
|
178
|
+
PyErr_SetString(PyExc_RuntimeError, "Failed to open libcuda.so.1"); \
|
|
179
|
+
return NULL; \
|
|
180
|
+
} \
|
|
181
|
+
/* Clear any existing error */ \
|
|
182
|
+
dlerror(); \
|
|
183
|
+
symbolName##_t funcHandle = (symbolName##_t)dlsym(libHandle, #symbolName); \
|
|
184
|
+
/* Check for errors */ \
|
|
185
|
+
const char *err = dlerror(); \
|
|
186
|
+
if (err) { \
|
|
187
|
+
PyErr_SetString(PyExc_RuntimeError, \
|
|
188
|
+
"Failed to retrieve " #symbolName " from libcuda.so.1"); \
|
|
189
|
+
dlclose(libHandle); \
|
|
190
|
+
return NULL; \
|
|
191
|
+
} \
|
|
192
|
+
return funcHandle; \
|
|
193
|
+
}
|
|
194
|
+
#else
|
|
195
|
+
#define defineGetFunctionHandle(name, symbolName) \
|
|
196
|
+
static symbolName##_t name() { \
|
|
197
|
+
/* Open the shared library */ \
|
|
198
|
+
HMODULE handle = LoadLibraryA("nvcuda.dll"); \
|
|
199
|
+
if (!handle) { \
|
|
200
|
+
PyErr_SetString(PyExc_RuntimeError, "Failed to open nvcuda.dll"); \
|
|
201
|
+
return NULL; \
|
|
202
|
+
} \
|
|
203
|
+
symbolName##_t funcHandle = \
|
|
204
|
+
(symbolName##_t)GetProcAddress((HMODULE)handle, #symbolName); \
|
|
205
|
+
/* Check for errors */ \
|
|
206
|
+
long err = GetLastError(); \
|
|
207
|
+
if (err) { \
|
|
208
|
+
PyErr_SetString(PyExc_RuntimeError, \
|
|
209
|
+
"Failed to retrieve " #symbolName " from nvcuda.dll"); \
|
|
210
|
+
return NULL; \
|
|
211
|
+
} \
|
|
212
|
+
return funcHandle; \
|
|
213
|
+
}
|
|
214
|
+
#endif
|
|
215
|
+
|
|
216
|
+
defineGetFunctionHandle(getCuOccupancyMaxActiveClustersHandle,
|
|
217
|
+
cuOccupancyMaxActiveClusters);
|
|
218
|
+
|
|
219
|
+
defineGetFunctionHandle(getCuTensorMapEncodeTiledHandle,
|
|
220
|
+
cuTensorMapEncodeTiled);
|
|
221
|
+
|
|
222
|
+
static PyObject *occupancyMaxActiveClusters(PyObject *self, PyObject *args) {
|
|
223
|
+
int clusterDimX = -1, clusterDimY = -1, clusterDimZ = -1,
|
|
224
|
+
maxActiveClusters = -1;
|
|
225
|
+
int shared = 0;
|
|
226
|
+
CUfunction func;
|
|
227
|
+
|
|
228
|
+
if (!PyArg_ParseTuple(args, "Kiiii", &func, &shared, &clusterDimX,
|
|
229
|
+
&clusterDimY, &clusterDimZ)) {
|
|
230
|
+
return NULL;
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
// Let each SM have one block
|
|
234
|
+
int maxActiveBlocks = 1;
|
|
235
|
+
Py_BEGIN_ALLOW_THREADS;
|
|
236
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(cuFuncSetAttribute(
|
|
237
|
+
func, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared));
|
|
238
|
+
Py_END_ALLOW_THREADS;
|
|
239
|
+
|
|
240
|
+
CUlaunchAttribute launchAttr[1];
|
|
241
|
+
launchAttr[0].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION;
|
|
242
|
+
launchAttr[0].value.clusterDim.x = clusterDimX;
|
|
243
|
+
launchAttr[0].value.clusterDim.y = clusterDimY;
|
|
244
|
+
launchAttr[0].value.clusterDim.z = clusterDimZ;
|
|
245
|
+
CUlaunchConfig config;
|
|
246
|
+
config.gridDimX = clusterDimX;
|
|
247
|
+
config.gridDimY = maxActiveBlocks * clusterDimY;
|
|
248
|
+
config.gridDimZ = clusterDimZ;
|
|
249
|
+
config.blockDimX = 128;
|
|
250
|
+
config.blockDimY = 1;
|
|
251
|
+
config.blockDimZ = 1;
|
|
252
|
+
config.sharedMemBytes = shared;
|
|
253
|
+
config.hStream = 0;
|
|
254
|
+
config.numAttrs = 1;
|
|
255
|
+
config.attrs = launchAttr;
|
|
256
|
+
|
|
257
|
+
static cuOccupancyMaxActiveClusters_t cuOccupancyMaxActiveClusters = NULL;
|
|
258
|
+
INITIALIZE_FUNCTION_POINTER_IF_NULL(cuOccupancyMaxActiveClusters,
|
|
259
|
+
getCuOccupancyMaxActiveClustersHandle);
|
|
260
|
+
|
|
261
|
+
Py_BEGIN_ALLOW_THREADS;
|
|
262
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(cuFuncSetAttribute(
|
|
263
|
+
func, CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED, 1));
|
|
264
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(
|
|
265
|
+
cuOccupancyMaxActiveClusters(&maxActiveClusters, func, &config));
|
|
266
|
+
Py_END_ALLOW_THREADS;
|
|
267
|
+
return PyLong_FromLong(maxActiveClusters);
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
static PyObject *setPrintfFifoSize(PyObject *self, PyObject *args) {
|
|
271
|
+
long size;
|
|
272
|
+
if (!PyArg_ParseTuple(args, "l", &size)) {
|
|
273
|
+
return NULL;
|
|
274
|
+
}
|
|
275
|
+
if (size < 0) {
|
|
276
|
+
PyErr_SetString(PyExc_ValueError, "fifo size must be non-negative");
|
|
277
|
+
return NULL;
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
Py_BEGIN_ALLOW_THREADS;
|
|
281
|
+
|
|
282
|
+
// Ensure we have an active context.
|
|
283
|
+
CUcontext ctx = NULL;
|
|
284
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(cuCtxGetCurrent(&ctx));
|
|
285
|
+
if (!ctx) {
|
|
286
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(
|
|
287
|
+
cuDevicePrimaryCtxRetain(&ctx, /*device=*/0));
|
|
288
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(cuCtxSetCurrent(ctx));
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
// We can't set the fifo size after running a kernel that calls printf. This
|
|
292
|
+
// is true even if the set() call is a nop and the new size is the same as the
|
|
293
|
+
// old size.
|
|
294
|
+
//
|
|
295
|
+
// This is unfriendly, so check if the old size matches the new size, and skip
|
|
296
|
+
// the set() call if so.
|
|
297
|
+
size_t oldSize = 0;
|
|
298
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(
|
|
299
|
+
cuCtxGetLimit(&oldSize, CU_LIMIT_PRINTF_FIFO_SIZE));
|
|
300
|
+
if (oldSize != size) {
|
|
301
|
+
CUDA_CHECK_AND_RETURN_NULL_ALLOW_THREADS(
|
|
302
|
+
cuCtxSetLimit(CU_LIMIT_PRINTF_FIFO_SIZE, size));
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
Py_END_ALLOW_THREADS;
|
|
306
|
+
return Py_None;
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
// Simple helper to experiment creating TMA descriptors on the host.
|
|
310
|
+
// This is a useful to test TMA operations independently.
|
|
311
|
+
static PyObject *fill1DTMADescriptor(PyObject *self, PyObject *args) {
|
|
312
|
+
unsigned long long global_address;
|
|
313
|
+
uint64_t dim;
|
|
314
|
+
uint32_t tensorDim;
|
|
315
|
+
int elementSize;
|
|
316
|
+
Py_buffer desc_buffer;
|
|
317
|
+
if (!PyArg_ParseTuple(args, "KKiiy*", &global_address, &dim, &tensorDim,
|
|
318
|
+
&elementSize, &desc_buffer)) {
|
|
319
|
+
return NULL;
|
|
320
|
+
}
|
|
321
|
+
char *desc = (char *)desc_buffer.buf;
|
|
322
|
+
uint64_t dims[1] = {dim};
|
|
323
|
+
uint64_t globalStrides[1] = {dim * elementSize};
|
|
324
|
+
uint32_t boxDim[1] = {tensorDim};
|
|
325
|
+
uint32_t elementStrides[1] = {1};
|
|
326
|
+
CUtensorMapDataType type;
|
|
327
|
+
switch (elementSize) {
|
|
328
|
+
case 1:
|
|
329
|
+
type = CU_TENSOR_MAP_DATA_TYPE_UINT8;
|
|
330
|
+
break;
|
|
331
|
+
case 2:
|
|
332
|
+
type = CU_TENSOR_MAP_DATA_TYPE_UINT16;
|
|
333
|
+
break;
|
|
334
|
+
case 4:
|
|
335
|
+
type = CU_TENSOR_MAP_DATA_TYPE_UINT32;
|
|
336
|
+
break;
|
|
337
|
+
default:
|
|
338
|
+
PyErr_SetString(PyExc_ValueError, "elementSize must be 1, 2, or 4");
|
|
339
|
+
}
|
|
340
|
+
assert((elementSize * tensorDim) >= 32 && "block size too small.");
|
|
341
|
+
int rank = 1;
|
|
342
|
+
static cuTensorMapEncodeTiled_t cuTensorMapEncodeTiled = NULL;
|
|
343
|
+
INITIALIZE_FUNCTION_POINTER_IF_NULL(cuTensorMapEncodeTiled,
|
|
344
|
+
getCuTensorMapEncodeTiledHandle);
|
|
345
|
+
CUresult result = cuTensorMapEncodeTiled(
|
|
346
|
+
(CUtensorMap *)desc, type, rank, (void *)global_address, dims,
|
|
347
|
+
globalStrides, boxDim, elementStrides, CU_TENSOR_MAP_INTERLEAVE_NONE,
|
|
348
|
+
CU_TENSOR_MAP_SWIZZLE_NONE, CU_TENSOR_MAP_L2_PROMOTION_NONE,
|
|
349
|
+
CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE);
|
|
350
|
+
assert(result == CUDA_SUCCESS);
|
|
351
|
+
return Py_None;
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
// Simple helper to experiment creating TMA descriptors on the host.
|
|
355
|
+
// This is a useful to test TMA operations independently.
|
|
356
|
+
static PyObject *fill2DTMADescriptor(PyObject *self, PyObject *args) {
|
|
357
|
+
unsigned long long global_address;
|
|
358
|
+
uint64_t dims[2];
|
|
359
|
+
uint32_t tensorDims[2];
|
|
360
|
+
int elementSize;
|
|
361
|
+
Py_buffer desc_buffer;
|
|
362
|
+
if (!PyArg_ParseTuple(args, "KKKiiiy*", &global_address, &dims[1], &dims[0],
|
|
363
|
+
&tensorDims[1], &tensorDims[0], &elementSize,
|
|
364
|
+
&desc_buffer)) {
|
|
365
|
+
return NULL;
|
|
366
|
+
}
|
|
367
|
+
char *desc = (char *)desc_buffer.buf;
|
|
368
|
+
uint64_t globalStrides[2] = {dims[0] * elementSize,
|
|
369
|
+
dims[0] * dims[1] * elementSize};
|
|
370
|
+
uint32_t elementStrides[2] = {1, 1};
|
|
371
|
+
CUtensorMapDataType type;
|
|
372
|
+
switch (elementSize) {
|
|
373
|
+
case 1:
|
|
374
|
+
type = CU_TENSOR_MAP_DATA_TYPE_UINT8;
|
|
375
|
+
break;
|
|
376
|
+
case 2:
|
|
377
|
+
type = CU_TENSOR_MAP_DATA_TYPE_UINT16;
|
|
378
|
+
break;
|
|
379
|
+
case 4:
|
|
380
|
+
type = CU_TENSOR_MAP_DATA_TYPE_UINT32;
|
|
381
|
+
break;
|
|
382
|
+
default:
|
|
383
|
+
PyErr_SetString(PyExc_ValueError, "elementSize must be 1, 2, or 4");
|
|
384
|
+
}
|
|
385
|
+
int rank = 2;
|
|
386
|
+
// Swizzling should be picked in codegen but since we need to set it on the
|
|
387
|
+
// descriptor we rely on a convention between this function and codegen.
|
|
388
|
+
CUtensorMapSwizzle swizzle = CU_TENSOR_MAP_SWIZZLE_128B;
|
|
389
|
+
uint32_t contigDimSizeInByte = elementSize * tensorDims[0];
|
|
390
|
+
if (contigDimSizeInByte >= 128) {
|
|
391
|
+
swizzle = CU_TENSOR_MAP_SWIZZLE_128B;
|
|
392
|
+
} else if (contigDimSizeInByte >= 64) {
|
|
393
|
+
swizzle = CU_TENSOR_MAP_SWIZZLE_64B;
|
|
394
|
+
} else if (contigDimSizeInByte >= 32) {
|
|
395
|
+
swizzle = CU_TENSOR_MAP_SWIZZLE_32B;
|
|
396
|
+
} else {
|
|
397
|
+
assert(false && "block size too small.");
|
|
398
|
+
}
|
|
399
|
+
// The bounding box inner dimension must be less than or equal to the swizzle
|
|
400
|
+
// size.
|
|
401
|
+
// https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__TENSOR__MEMORY.html#group__CUDA__TENSOR__MEMORY_1ga7c7d2aaac9e49294304e755e6f341d7
|
|
402
|
+
// We clamp the block size and the codegen will emit multiple copy operations.
|
|
403
|
+
if (contigDimSizeInByte > 128) {
|
|
404
|
+
tensorDims[0] = 128 / elementSize;
|
|
405
|
+
}
|
|
406
|
+
static cuTensorMapEncodeTiled_t cuTensorMapEncodeTiled = NULL;
|
|
407
|
+
INITIALIZE_FUNCTION_POINTER_IF_NULL(cuTensorMapEncodeTiled,
|
|
408
|
+
getCuTensorMapEncodeTiledHandle);
|
|
409
|
+
CUresult result = cuTensorMapEncodeTiled(
|
|
410
|
+
(CUtensorMap *)desc, type, rank, (void *)global_address, dims,
|
|
411
|
+
globalStrides, tensorDims, elementStrides, CU_TENSOR_MAP_INTERLEAVE_NONE,
|
|
412
|
+
swizzle, CU_TENSOR_MAP_L2_PROMOTION_L2_128B,
|
|
413
|
+
CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE);
|
|
414
|
+
assert(result == CUDA_SUCCESS);
|
|
415
|
+
return Py_None;
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
static PyMethodDef ModuleMethods[] = {
|
|
419
|
+
{"load_binary", loadBinary, METH_VARARGS,
|
|
420
|
+
"Load provided cubin into CUDA driver"},
|
|
421
|
+
{"get_device_properties", getDeviceProperties, METH_VARARGS,
|
|
422
|
+
"Get the properties for a given device"},
|
|
423
|
+
{"cuOccupancyMaxActiveClusters", occupancyMaxActiveClusters, METH_VARARGS,
|
|
424
|
+
"Python interface for cuOccupancyMaxActiveClusters function"},
|
|
425
|
+
{"set_printf_fifo_size", setPrintfFifoSize, METH_VARARGS,
|
|
426
|
+
"Python interface for cuCtxSetLimit(CU_LIMIT_PRINTF_FIFO_SIZE, x), which "
|
|
427
|
+
"controls how many bytes can be streamed from kernels before data starts "
|
|
428
|
+
"being dropped. This inherits all the limitations of this call; in "
|
|
429
|
+
"particular it's an error to change this value after launching any kernel "
|
|
430
|
+
"that calls printf()."},
|
|
431
|
+
{"fill_1d_tma_descriptor", fill1DTMADescriptor, METH_VARARGS, "doc"},
|
|
432
|
+
{"fill_2d_tma_descriptor", fill2DTMADescriptor, METH_VARARGS, "doc"},
|
|
433
|
+
|
|
434
|
+
{NULL, NULL, 0, NULL} // sentinel
|
|
435
|
+
};
|
|
436
|
+
|
|
437
|
+
static struct PyModuleDef ModuleDef = {PyModuleDef_HEAD_INIT, "cuda_utils",
|
|
438
|
+
NULL, // documentation
|
|
439
|
+
-1, // size
|
|
440
|
+
ModuleMethods};
|
|
441
|
+
|
|
442
|
+
PyMODINIT_FUNC PyInit_cuda_utils(void) {
|
|
443
|
+
PyObject *m = PyModule_Create(&ModuleDef);
|
|
444
|
+
if (m == NULL) {
|
|
445
|
+
return NULL;
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
PyModule_AddFunctions(m, ModuleMethods);
|
|
449
|
+
|
|
450
|
+
return m;
|
|
451
|
+
}
|