triton-windows 3.5.1.post21__cp313-cp313-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- triton/_C/libtriton.pyd +0 -0
- triton/__init__.py +82 -0
- triton/_filecheck.py +97 -0
- triton/_internal_testing.py +255 -0
- triton/_utils.py +126 -0
- triton/backends/__init__.py +47 -0
- triton/backends/amd/__init__.py +0 -0
- triton/backends/amd/compiler.py +461 -0
- triton/backends/amd/driver.c +283 -0
- triton/backends/amd/driver.py +724 -0
- triton/backends/amd/lib/asanrtl.bc +0 -0
- triton/backends/amd/lib/ockl.bc +0 -0
- triton/backends/amd/lib/ocml.bc +0 -0
- triton/backends/compiler.py +90 -0
- triton/backends/driver.py +66 -0
- triton/backends/nvidia/__init__.py +0 -0
- triton/backends/nvidia/bin/ptxas.exe +0 -0
- triton/backends/nvidia/compiler.py +533 -0
- triton/backends/nvidia/driver.c +517 -0
- triton/backends/nvidia/driver.py +799 -0
- triton/backends/nvidia/include/cuda.h +26280 -0
- triton/backends/nvidia/lib/libdevice.10.bc +0 -0
- triton/backends/nvidia/lib/x64/cuda.lib +0 -0
- triton/compiler/__init__.py +7 -0
- triton/compiler/code_generator.py +1614 -0
- triton/compiler/compiler.py +509 -0
- triton/compiler/errors.py +51 -0
- triton/compiler/make_launcher.py +0 -0
- triton/errors.py +5 -0
- triton/experimental/__init__.py +0 -0
- triton/experimental/gluon/__init__.py +5 -0
- triton/experimental/gluon/_compiler.py +0 -0
- triton/experimental/gluon/_runtime.py +102 -0
- triton/experimental/gluon/language/__init__.py +119 -0
- triton/experimental/gluon/language/_core.py +490 -0
- triton/experimental/gluon/language/_layouts.py +583 -0
- triton/experimental/gluon/language/_math.py +20 -0
- triton/experimental/gluon/language/_semantic.py +380 -0
- triton/experimental/gluon/language/_standard.py +80 -0
- triton/experimental/gluon/language/amd/__init__.py +4 -0
- triton/experimental/gluon/language/amd/_layouts.py +96 -0
- triton/experimental/gluon/language/amd/cdna3/__init__.py +100 -0
- triton/experimental/gluon/language/amd/cdna4/__init__.py +48 -0
- triton/experimental/gluon/language/amd/cdna4/async_copy.py +151 -0
- triton/experimental/gluon/language/extra/__init__.py +3 -0
- triton/experimental/gluon/language/nvidia/__init__.py +4 -0
- triton/experimental/gluon/language/nvidia/ampere/__init__.py +3 -0
- triton/experimental/gluon/language/nvidia/ampere/async_copy.py +74 -0
- triton/experimental/gluon/language/nvidia/ampere/mbarrier.py +80 -0
- triton/experimental/gluon/language/nvidia/blackwell/__init__.py +387 -0
- triton/experimental/gluon/language/nvidia/blackwell/tma.py +52 -0
- triton/experimental/gluon/language/nvidia/hopper/__init__.py +132 -0
- triton/experimental/gluon/language/nvidia/hopper/mbarrier.py +34 -0
- triton/experimental/gluon/language/nvidia/hopper/tma.py +97 -0
- triton/experimental/gluon/nvidia/__init__.py +4 -0
- triton/experimental/gluon/nvidia/blackwell.py +3 -0
- triton/experimental/gluon/nvidia/hopper.py +45 -0
- triton/knobs.py +546 -0
- triton/language/__init__.py +342 -0
- triton/language/core.py +3405 -0
- triton/language/extra/__init__.py +26 -0
- triton/language/extra/cuda/__init__.py +16 -0
- triton/language/extra/cuda/gdc.py +42 -0
- triton/language/extra/cuda/libdevice.py +1629 -0
- triton/language/extra/cuda/utils.py +109 -0
- triton/language/extra/hip/__init__.py +5 -0
- triton/language/extra/hip/libdevice.py +491 -0
- triton/language/extra/hip/utils.py +35 -0
- triton/language/extra/libdevice.py +790 -0
- triton/language/math.py +249 -0
- triton/language/random.py +218 -0
- triton/language/semantic.py +1939 -0
- triton/language/standard.py +534 -0
- triton/language/target_info.py +54 -0
- triton/runtime/__init__.py +23 -0
- triton/runtime/_allocation.py +44 -0
- triton/runtime/_async_compile.py +55 -0
- triton/runtime/autotuner.py +476 -0
- triton/runtime/build.py +168 -0
- triton/runtime/cache.py +317 -0
- triton/runtime/driver.py +38 -0
- triton/runtime/errors.py +36 -0
- triton/runtime/interpreter.py +1414 -0
- triton/runtime/jit.py +1107 -0
- triton/runtime/tcc/include/_mingw.h +168 -0
- triton/runtime/tcc/include/assert.h +62 -0
- triton/runtime/tcc/include/conio.h +409 -0
- triton/runtime/tcc/include/ctype.h +281 -0
- triton/runtime/tcc/include/dir.h +31 -0
- triton/runtime/tcc/include/direct.h +68 -0
- triton/runtime/tcc/include/dirent.h +135 -0
- triton/runtime/tcc/include/dos.h +55 -0
- triton/runtime/tcc/include/errno.h +75 -0
- triton/runtime/tcc/include/excpt.h +123 -0
- triton/runtime/tcc/include/fcntl.h +52 -0
- triton/runtime/tcc/include/fenv.h +108 -0
- triton/runtime/tcc/include/float.h +75 -0
- triton/runtime/tcc/include/inttypes.h +297 -0
- triton/runtime/tcc/include/io.h +418 -0
- triton/runtime/tcc/include/iso646.h +36 -0
- triton/runtime/tcc/include/limits.h +116 -0
- triton/runtime/tcc/include/locale.h +91 -0
- triton/runtime/tcc/include/malloc.h +181 -0
- triton/runtime/tcc/include/math.h +497 -0
- triton/runtime/tcc/include/mem.h +13 -0
- triton/runtime/tcc/include/memory.h +40 -0
- triton/runtime/tcc/include/process.h +176 -0
- triton/runtime/tcc/include/sec_api/conio_s.h +42 -0
- triton/runtime/tcc/include/sec_api/crtdbg_s.h +19 -0
- triton/runtime/tcc/include/sec_api/io_s.h +33 -0
- triton/runtime/tcc/include/sec_api/mbstring_s.h +52 -0
- triton/runtime/tcc/include/sec_api/search_s.h +25 -0
- triton/runtime/tcc/include/sec_api/stdio_s.h +145 -0
- triton/runtime/tcc/include/sec_api/stdlib_s.h +67 -0
- triton/runtime/tcc/include/sec_api/stralign_s.h +30 -0
- triton/runtime/tcc/include/sec_api/string_s.h +41 -0
- triton/runtime/tcc/include/sec_api/sys/timeb_s.h +34 -0
- triton/runtime/tcc/include/sec_api/tchar_s.h +266 -0
- triton/runtime/tcc/include/sec_api/time_s.h +61 -0
- triton/runtime/tcc/include/sec_api/wchar_s.h +128 -0
- triton/runtime/tcc/include/setjmp.h +160 -0
- triton/runtime/tcc/include/share.h +28 -0
- triton/runtime/tcc/include/signal.h +63 -0
- triton/runtime/tcc/include/stdalign.h +16 -0
- triton/runtime/tcc/include/stdarg.h +14 -0
- triton/runtime/tcc/include/stdatomic.h +171 -0
- triton/runtime/tcc/include/stdbool.h +11 -0
- triton/runtime/tcc/include/stddef.h +42 -0
- triton/runtime/tcc/include/stdint.h +212 -0
- triton/runtime/tcc/include/stdio.h +429 -0
- triton/runtime/tcc/include/stdlib.h +591 -0
- triton/runtime/tcc/include/stdnoreturn.h +7 -0
- triton/runtime/tcc/include/string.h +164 -0
- triton/runtime/tcc/include/sys/fcntl.h +13 -0
- triton/runtime/tcc/include/sys/file.h +14 -0
- triton/runtime/tcc/include/sys/locking.h +30 -0
- triton/runtime/tcc/include/sys/stat.h +290 -0
- triton/runtime/tcc/include/sys/time.h +69 -0
- triton/runtime/tcc/include/sys/timeb.h +133 -0
- triton/runtime/tcc/include/sys/types.h +123 -0
- triton/runtime/tcc/include/sys/unistd.h +14 -0
- triton/runtime/tcc/include/sys/utime.h +146 -0
- triton/runtime/tcc/include/tcc/tcc_libm.h +618 -0
- triton/runtime/tcc/include/tccdefs.h +342 -0
- triton/runtime/tcc/include/tcclib.h +80 -0
- triton/runtime/tcc/include/tchar.h +1102 -0
- triton/runtime/tcc/include/tgmath.h +89 -0
- triton/runtime/tcc/include/time.h +287 -0
- triton/runtime/tcc/include/uchar.h +33 -0
- triton/runtime/tcc/include/unistd.h +1 -0
- triton/runtime/tcc/include/vadefs.h +11 -0
- triton/runtime/tcc/include/values.h +4 -0
- triton/runtime/tcc/include/varargs.h +12 -0
- triton/runtime/tcc/include/wchar.h +873 -0
- triton/runtime/tcc/include/wctype.h +172 -0
- triton/runtime/tcc/include/winapi/basetsd.h +149 -0
- triton/runtime/tcc/include/winapi/basetyps.h +85 -0
- triton/runtime/tcc/include/winapi/guiddef.h +156 -0
- triton/runtime/tcc/include/winapi/poppack.h +8 -0
- triton/runtime/tcc/include/winapi/pshpack1.h +8 -0
- triton/runtime/tcc/include/winapi/pshpack2.h +8 -0
- triton/runtime/tcc/include/winapi/pshpack4.h +8 -0
- triton/runtime/tcc/include/winapi/pshpack8.h +8 -0
- triton/runtime/tcc/include/winapi/qos.h +72 -0
- triton/runtime/tcc/include/winapi/shellapi.h +59 -0
- triton/runtime/tcc/include/winapi/winbase.h +2958 -0
- triton/runtime/tcc/include/winapi/wincon.h +309 -0
- triton/runtime/tcc/include/winapi/windef.h +293 -0
- triton/runtime/tcc/include/winapi/windows.h +127 -0
- triton/runtime/tcc/include/winapi/winerror.h +3166 -0
- triton/runtime/tcc/include/winapi/wingdi.h +4080 -0
- triton/runtime/tcc/include/winapi/winnls.h +778 -0
- triton/runtime/tcc/include/winapi/winnt.h +5837 -0
- triton/runtime/tcc/include/winapi/winreg.h +272 -0
- triton/runtime/tcc/include/winapi/winsock2.h +1474 -0
- triton/runtime/tcc/include/winapi/winuser.h +5651 -0
- triton/runtime/tcc/include/winapi/winver.h +160 -0
- triton/runtime/tcc/include/winapi/ws2ipdef.h +21 -0
- triton/runtime/tcc/include/winapi/ws2tcpip.h +391 -0
- triton/runtime/tcc/lib/cuda.def +697 -0
- triton/runtime/tcc/lib/gdi32.def +337 -0
- triton/runtime/tcc/lib/kernel32.def +770 -0
- triton/runtime/tcc/lib/libtcc1.a +0 -0
- triton/runtime/tcc/lib/msvcrt.def +1399 -0
- triton/runtime/tcc/lib/python3.def +810 -0
- triton/runtime/tcc/lib/python310.def +1610 -0
- triton/runtime/tcc/lib/python311.def +1633 -0
- triton/runtime/tcc/lib/python312.def +1703 -0
- triton/runtime/tcc/lib/python313.def +1651 -0
- triton/runtime/tcc/lib/python313t.def +1656 -0
- triton/runtime/tcc/lib/python314.def +1800 -0
- triton/runtime/tcc/lib/python314t.def +1809 -0
- triton/runtime/tcc/lib/python39.def +1644 -0
- triton/runtime/tcc/lib/python3t.def +905 -0
- triton/runtime/tcc/lib/user32.def +658 -0
- triton/runtime/tcc/libtcc.dll +0 -0
- triton/runtime/tcc/tcc.exe +0 -0
- triton/testing.py +543 -0
- triton/tools/__init__.py +0 -0
- triton/tools/build_extern.py +365 -0
- triton/tools/compile.py +210 -0
- triton/tools/disasm.py +143 -0
- triton/tools/extra/cuda/compile.c +70 -0
- triton/tools/extra/cuda/compile.h +14 -0
- triton/tools/extra/hip/compile.cpp +66 -0
- triton/tools/extra/hip/compile.h +13 -0
- triton/tools/link.py +322 -0
- triton/tools/mxfp.py +301 -0
- triton/tools/ragged_tma.py +92 -0
- triton/tools/tensor_descriptor.py +34 -0
- triton/windows_utils.py +405 -0
- triton_windows-3.5.1.post21.dist-info/METADATA +46 -0
- triton_windows-3.5.1.post21.dist-info/RECORD +217 -0
- triton_windows-3.5.1.post21.dist-info/WHEEL +5 -0
- triton_windows-3.5.1.post21.dist-info/entry_points.txt +3 -0
- triton_windows-3.5.1.post21.dist-info/licenses/LICENSE +23 -0
- triton_windows-3.5.1.post21.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,461 @@
|
|
|
1
|
+
from triton.backends.compiler import BaseBackend, GPUTarget, Language
|
|
2
|
+
from triton._C.libtriton import ir, passes, llvm, amd
|
|
3
|
+
from triton import knobs
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import Any, Dict, Tuple
|
|
6
|
+
from types import ModuleType
|
|
7
|
+
import hashlib
|
|
8
|
+
import tempfile
|
|
9
|
+
import re
|
|
10
|
+
import functools
|
|
11
|
+
import warnings
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def get_min_dot_size(target: GPUTarget):
|
|
16
|
+
# We fallback to use FMA and cast arguments if certain configurations is
|
|
17
|
+
# not supported natively by matrix core units.
|
|
18
|
+
return lambda lhs_type, rhs_type: (1, 1, 1)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def is_pingpong_schedule_enabled(arch, use_async_copy):
|
|
22
|
+
return (arch == "gfx942" or (arch == "gfx950" and use_async_copy is True)
|
|
23
|
+
) if knobs.amd.use_block_pingpong is None else knobs.amd.use_block_pingpong
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def is_in_thread_transpose_enabled(arch):
|
|
27
|
+
return (arch == "gfx942") if knobs.amd.use_in_thread_transpose is None else knobs.amd.use_in_thread_transpose
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass(frozen=True)
|
|
31
|
+
class HIPOptions:
|
|
32
|
+
num_warps: int = 4
|
|
33
|
+
waves_per_eu: int = 1
|
|
34
|
+
num_stages: int = 2
|
|
35
|
+
num_ctas: int = 1
|
|
36
|
+
extern_libs: dict = None
|
|
37
|
+
cluster_dims: tuple = (1, 1, 1)
|
|
38
|
+
debug: bool = False
|
|
39
|
+
sanitize_overflow: bool = True
|
|
40
|
+
arch: str = None
|
|
41
|
+
# We have native support for OCP fp8 variants since CDNA4/RDNA4. For earlier generations,
|
|
42
|
+
# we software emulate the support for them.
|
|
43
|
+
# UZ fp8 variants (fp8e4b8 and fp8e5b16) are natively supported for CDNA3. For other
|
|
44
|
+
# architectures they are software emulated.
|
|
45
|
+
supported_fp8_dtypes: Tuple[str] = ("fp8e4nv", "fp8e5", "fp8e5b16", "fp8e4b8")
|
|
46
|
+
deprecated_fp8_dot_operand_dtypes: Tuple[str] = ()
|
|
47
|
+
default_dot_input_precision: str = "ieee"
|
|
48
|
+
allowed_dot_input_precisions: Tuple[str] = ("ieee", )
|
|
49
|
+
enable_fp_fusion: bool = True
|
|
50
|
+
launch_cooperative_grid: bool = False
|
|
51
|
+
matrix_instr_nonkdim: int = 0
|
|
52
|
+
kpack: int = 1
|
|
53
|
+
allow_flush_denorm: bool = False
|
|
54
|
+
max_num_imprecise_acc_default: int = 0
|
|
55
|
+
backend_name: str = 'hip'
|
|
56
|
+
instrumentation_mode: str = ""
|
|
57
|
+
|
|
58
|
+
# The following option provides hints to the AMDGPU backend regarding instruction scheduling
|
|
59
|
+
# for all `tt.dot` operations in a kernel. The "none" variant preserves the default
|
|
60
|
+
# instruction scheduling of the AMDGPU backend which aims at maximizing occupancy.
|
|
61
|
+
# The option is experimental and may change at any time regarding its semantics and/or may
|
|
62
|
+
# be gone entirely anytime.
|
|
63
|
+
#
|
|
64
|
+
# Current experimental scheduling variants:
|
|
65
|
+
#
|
|
66
|
+
# attention: enables a bunch of optimizations for attention kernels, including:
|
|
67
|
+
# - iglp 2 and sched.barrier around it
|
|
68
|
+
# - sink-insts-to-avoid-spills flag to avoid register spills
|
|
69
|
+
schedule_hint: str = 'none'
|
|
70
|
+
|
|
71
|
+
def __post_init__(self):
|
|
72
|
+
gfx_major = int(self.arch[3:-2]) # Drop "gfx" prefix and minor/patch number
|
|
73
|
+
warp_size = 32 if gfx_major >= 10 else 64
|
|
74
|
+
object.__setattr__(self, 'warp_size', warp_size)
|
|
75
|
+
assert self.num_warps > 0 and (self.num_warps & (self.num_warps - 1)) == 0, \
|
|
76
|
+
"num_warps must be a power of 2"
|
|
77
|
+
|
|
78
|
+
if (self.arch == 'gfx950') and (self.kpack != 1):
|
|
79
|
+
warnings.warn(
|
|
80
|
+
f"kpack is deprecated starting from gfx950 and will be removed in later releases. So for now kpack = {self.kpack} will be overwritten to 1 to make transitioning easier."
|
|
81
|
+
)
|
|
82
|
+
object.__setattr__(self, 'kpack', 1)
|
|
83
|
+
|
|
84
|
+
default_libdir = Path(__file__).parent / 'lib'
|
|
85
|
+
extern_libs = {} if self.extern_libs is None else dict(self.extern_libs)
|
|
86
|
+
for lib in ["ocml", "ockl"]:
|
|
87
|
+
extern_libs[lib] = str(default_libdir / f'{lib}.bc')
|
|
88
|
+
object.__setattr__(self, 'extern_libs', tuple(extern_libs.items()))
|
|
89
|
+
|
|
90
|
+
def hash(self):
|
|
91
|
+
key = '_'.join([f'{name}-{val}' for name, val in self.__dict__.items()])
|
|
92
|
+
return hashlib.sha256(key.encode("utf-8")).hexdigest()
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class HIPBackend(BaseBackend):
|
|
96
|
+
instrumentation = None
|
|
97
|
+
|
|
98
|
+
@staticmethod
|
|
99
|
+
def supports_target(target: GPUTarget):
|
|
100
|
+
return target.backend == 'hip'
|
|
101
|
+
|
|
102
|
+
def __init__(self, target: GPUTarget) -> None:
|
|
103
|
+
super().__init__(target)
|
|
104
|
+
assert isinstance(target.arch, str)
|
|
105
|
+
self.binary_ext = "hsaco"
|
|
106
|
+
|
|
107
|
+
def get_target_name(self, options) -> str:
|
|
108
|
+
return f"hip:{options.arch}"
|
|
109
|
+
|
|
110
|
+
def parse_options(self, opts) -> Any:
|
|
111
|
+
args = {'arch': knobs.runtime.override_arch or self.target.arch}
|
|
112
|
+
|
|
113
|
+
if opts.get("num_ctas", 1) > 1:
|
|
114
|
+
raise ValueError("num_ctas > 1 not supported for AMD GPUs")
|
|
115
|
+
|
|
116
|
+
# Enable XF32 (TF32) for CDNA3 GPUs
|
|
117
|
+
if self.target.arch == 'gfx942':
|
|
118
|
+
allowed_dot_input_precisions = set(HIPOptions.allowed_dot_input_precisions)
|
|
119
|
+
allowed_dot_input_precisions.update({'tf32'})
|
|
120
|
+
args["allowed_dot_input_precisions"] = tuple(sorted(allowed_dot_input_precisions))
|
|
121
|
+
|
|
122
|
+
if "supported_fp8_dtypes" not in opts:
|
|
123
|
+
args["supported_fp8_dtypes"] = tuple(sorted(HIPOptions.supported_fp8_dtypes))
|
|
124
|
+
|
|
125
|
+
if self.target.arch == 'gfx950':
|
|
126
|
+
deprecated_fp8_dot_operand_dtypes = set(HIPOptions.deprecated_fp8_dot_operand_dtypes)
|
|
127
|
+
deprecated_fp8_dot_operand_dtypes.update({"fp8e5b16", "fp8e4b8"})
|
|
128
|
+
args["deprecated_fp8_dot_operand_dtypes"] = tuple(sorted(deprecated_fp8_dot_operand_dtypes))
|
|
129
|
+
|
|
130
|
+
if "enable_fp_fusion" not in opts:
|
|
131
|
+
args["enable_fp_fusion"] = knobs.language.default_fp_fusion
|
|
132
|
+
args.update({k: opts[k] for k in HIPOptions.__dataclass_fields__.keys() \
|
|
133
|
+
if k in opts and opts[k] is not None})
|
|
134
|
+
return HIPOptions(**args)
|
|
135
|
+
|
|
136
|
+
def pack_metadata(self, metadata):
|
|
137
|
+
return (
|
|
138
|
+
metadata.num_warps,
|
|
139
|
+
metadata.num_ctas,
|
|
140
|
+
metadata.shared,
|
|
141
|
+
metadata.cluster_dims[0],
|
|
142
|
+
metadata.cluster_dims[1],
|
|
143
|
+
metadata.cluster_dims[2],
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
def get_codegen_implementation(self, options):
|
|
147
|
+
return {"min_dot_size": get_min_dot_size(self.target)}
|
|
148
|
+
|
|
149
|
+
def get_module_map(self) -> Dict[str, ModuleType]:
|
|
150
|
+
from triton.language.extra.hip import libdevice
|
|
151
|
+
|
|
152
|
+
return {"triton.language.extra.libdevice": libdevice}
|
|
153
|
+
|
|
154
|
+
def load_dialects(self, ctx):
|
|
155
|
+
amd.load_dialects(ctx)
|
|
156
|
+
if HIPBackend.instrumentation:
|
|
157
|
+
HIPBackend.instrumentation.load_dialects(ctx)
|
|
158
|
+
|
|
159
|
+
@staticmethod
|
|
160
|
+
def is_within_2gb(arg):
|
|
161
|
+
import torch
|
|
162
|
+
|
|
163
|
+
MAX_INT_32 = 2**31 - 1
|
|
164
|
+
if hasattr(arg, "ptr_range"):
|
|
165
|
+
return arg.ptr_range() <= MAX_INT_32
|
|
166
|
+
if isinstance(arg, torch.Tensor) and hasattr(arg, "untyped_storage"):
|
|
167
|
+
return arg.untyped_storage().size() <= MAX_INT_32
|
|
168
|
+
return False
|
|
169
|
+
|
|
170
|
+
@staticmethod
|
|
171
|
+
def parse_attr(desc):
|
|
172
|
+
ret = BaseBackend.parse_attr(desc)
|
|
173
|
+
if "S" in desc:
|
|
174
|
+
ret += [["tt.pointer_range", 32]]
|
|
175
|
+
return ret
|
|
176
|
+
|
|
177
|
+
@staticmethod
|
|
178
|
+
def get_arg_specialization(arg, ty, **kwargs):
|
|
179
|
+
ret = BaseBackend.get_arg_specialization(arg, ty, **kwargs)
|
|
180
|
+
# Only attempt to do buffer ops specialization if buffer ops are enabled.
|
|
181
|
+
# Otherwise the is_within_2gb check is unnecessary overhead.
|
|
182
|
+
if knobs.amd.use_buffer_ops and ty == "tensor" and HIPBackend.is_within_2gb(arg):
|
|
183
|
+
ret += "S"
|
|
184
|
+
return ret
|
|
185
|
+
|
|
186
|
+
@staticmethod
|
|
187
|
+
def make_ttir(mod, metadata, options):
|
|
188
|
+
pm = ir.pass_manager(mod.context)
|
|
189
|
+
pm.enable_debug()
|
|
190
|
+
passes.common.add_inliner(pm)
|
|
191
|
+
passes.ttir.add_rewrite_tensor_pointer(pm)
|
|
192
|
+
passes.ttir.add_rewrite_tensor_descriptor_to_pointer(pm)
|
|
193
|
+
passes.common.add_canonicalizer(pm)
|
|
194
|
+
passes.ttir.add_combine(pm)
|
|
195
|
+
passes.ttir.add_reorder_broadcast(pm)
|
|
196
|
+
passes.common.add_cse(pm)
|
|
197
|
+
passes.ttir.add_triton_licm(pm)
|
|
198
|
+
passes.common.add_symbol_dce(pm)
|
|
199
|
+
passes.ttir.add_loop_unroll(pm)
|
|
200
|
+
pm.run(mod)
|
|
201
|
+
return mod
|
|
202
|
+
|
|
203
|
+
@staticmethod
|
|
204
|
+
def make_ttgir(mod, metadata, options):
|
|
205
|
+
pm = ir.pass_manager(mod.context)
|
|
206
|
+
pm.enable_debug()
|
|
207
|
+
passes.ttir.add_convert_to_ttgpuir(pm, f"hip:{options.arch}", options.num_warps, options.warp_size,
|
|
208
|
+
options.num_ctas)
|
|
209
|
+
pm.run(mod)
|
|
210
|
+
pm = ir.pass_manager(mod.context)
|
|
211
|
+
pm.enable_debug()
|
|
212
|
+
passes.ttgpuir.add_coalesce(pm)
|
|
213
|
+
passes.ttgpuir.add_remove_layout_conversions(pm)
|
|
214
|
+
passes.ttgpuir.add_optimize_thread_locality(pm)
|
|
215
|
+
amd.passes.ttgpuir.add_accelerate_matmul(pm, options.arch, options.matrix_instr_nonkdim, options.kpack)
|
|
216
|
+
passes.ttgpuir.add_remove_layout_conversions(pm)
|
|
217
|
+
amd.passes.ttgpuir.add_optimize_epilogue(pm)
|
|
218
|
+
passes.ttgpuir.add_optimize_dot_operands(pm, True)
|
|
219
|
+
amd.passes.ttgpuir.add_hoist_layout_conversions(pm)
|
|
220
|
+
|
|
221
|
+
passes.ttgpuir.add_fuse_nested_loops(pm)
|
|
222
|
+
passes.common.add_canonicalizer(pm)
|
|
223
|
+
passes.ttir.add_triton_licm(pm)
|
|
224
|
+
passes.common.add_canonicalizer(pm)
|
|
225
|
+
|
|
226
|
+
global_prefetch = knobs.amd.global_prefetch
|
|
227
|
+
local_prefetch = knobs.amd.local_prefetch
|
|
228
|
+
use_async_copy = knobs.amd.use_async_copy
|
|
229
|
+
use_block_pingpong = is_pingpong_schedule_enabled(options.arch, use_async_copy)
|
|
230
|
+
|
|
231
|
+
amd.passes.ttgpuir.add_stream_pipeline(pm, options.num_stages, global_prefetch, local_prefetch, use_async_copy,
|
|
232
|
+
use_block_pingpong)
|
|
233
|
+
if use_async_copy:
|
|
234
|
+
amd.passes.ttgpuir.add_coalesce_async_copy(pm, options.arch)
|
|
235
|
+
passes.common.add_canonicalizer(pm)
|
|
236
|
+
if options.schedule_hint.lower() != "none":
|
|
237
|
+
amd.passes.ttgpuir.insert_instruction_sched_hints(pm, options.schedule_hint)
|
|
238
|
+
passes.ttgpuir.add_optimize_dot_operands(pm, True)
|
|
239
|
+
passes.ttgpuir.add_remove_layout_conversions(pm)
|
|
240
|
+
passes.ttgpuir.add_reduce_data_duplication(pm)
|
|
241
|
+
if is_in_thread_transpose_enabled(options.arch):
|
|
242
|
+
amd.passes.ttgpuir.add_in_thread_transpose(pm)
|
|
243
|
+
passes.ttgpuir.add_remove_layout_conversions(pm)
|
|
244
|
+
amd.passes.ttgpuir.add_reorder_instructions(pm)
|
|
245
|
+
if use_block_pingpong and options.num_stages > 1:
|
|
246
|
+
amd.passes.ttgpuir.add_block_pingpong(pm, options.num_stages)
|
|
247
|
+
|
|
248
|
+
if knobs.amd.use_buffer_ops:
|
|
249
|
+
amd.passes.ttgpuir.add_canonicalize_pointers(pm)
|
|
250
|
+
passes.common.add_canonicalizer(pm)
|
|
251
|
+
amd.passes.ttgpuir.add_convert_to_buffer_ops(pm, options.arch, knobs.amd.use_buffer_atomics)
|
|
252
|
+
|
|
253
|
+
amd.passes.ttgpuir.add_fold_true_cmpi(pm)
|
|
254
|
+
passes.common.add_canonicalizer(pm)
|
|
255
|
+
passes.common.add_cse(pm)
|
|
256
|
+
passes.common.add_symbol_dce(pm)
|
|
257
|
+
if use_async_copy:
|
|
258
|
+
amd.passes.ttgpuir.add_update_async_wait_count(pm, options.arch)
|
|
259
|
+
pm.run(mod)
|
|
260
|
+
return mod
|
|
261
|
+
|
|
262
|
+
@staticmethod
|
|
263
|
+
def gluon_to_ttgir(src, metadata, options):
|
|
264
|
+
mod = src
|
|
265
|
+
pm = ir.pass_manager(mod.context)
|
|
266
|
+
pm.enable_debug()
|
|
267
|
+
|
|
268
|
+
passes.gluon.add_inliner(pm)
|
|
269
|
+
passes.gluon.add_resolve_auto_encodings(pm)
|
|
270
|
+
passes.common.add_sccp(pm)
|
|
271
|
+
passes.ttir.add_loop_aware_cse(pm)
|
|
272
|
+
passes.gluon.add_canonicalizer(pm)
|
|
273
|
+
passes.ttgpuir.add_combine_tensor_select_and_if(pm)
|
|
274
|
+
|
|
275
|
+
pm.run(mod)
|
|
276
|
+
return mod
|
|
277
|
+
|
|
278
|
+
@staticmethod
|
|
279
|
+
def make_llir(src, metadata, options):
|
|
280
|
+
mod = src
|
|
281
|
+
# TritonGPU -> LLVM-IR (MLIR)
|
|
282
|
+
pm = ir.pass_manager(mod.context)
|
|
283
|
+
pm.enable_debug()
|
|
284
|
+
# custom_lds_size is an experimental parameter that defines amount of LDS available
|
|
285
|
+
# for one thread block. Measured in bytes.
|
|
286
|
+
#
|
|
287
|
+
# If custom_lds_size = 0, pass will consider all LDS is available for one threads block,
|
|
288
|
+
# LDS size is determined by provided arch name.
|
|
289
|
+
custom_lds_size = 0
|
|
290
|
+
amd.passes.ttgpuir.add_optimize_lds_usage(pm, options.arch, custom_lds_size)
|
|
291
|
+
passes.convert.add_scf_to_cf(pm)
|
|
292
|
+
passes.convert.add_index_to_llvmir(pm)
|
|
293
|
+
|
|
294
|
+
amd.passes.ttgpuir.add_allocate_shared_memory(pm)
|
|
295
|
+
# instrumentation point here so we can override IRs above (e.g., ttir and ttgir)
|
|
296
|
+
if HIPBackend.instrumentation:
|
|
297
|
+
HIPBackend.instrumentation.patch("ttgpuir_to_llvmir", pm, mod.context)
|
|
298
|
+
## __HIP_FTZ is used to control the denorm flushing behavior of exp2 op as follows:
|
|
299
|
+
## 1. If __HIP_FTZ = 1, exp2 flushes denorms in input and output regardless
|
|
300
|
+
## of the value of kernel arg `allow_flush_denorm`.
|
|
301
|
+
## 2. If __HIP_FTZ = 0, whether exp2 flushes denorms in input and output
|
|
302
|
+
## depends on the value of kernel arg `allow_flush_denorm`.
|
|
303
|
+
## 3. __HIP_FTZ is default to 1 and not exposed as a kernel argument.
|
|
304
|
+
## For now it is used as a controller for developers only.
|
|
305
|
+
__HIP_FTZ = True
|
|
306
|
+
amd.passes.ttgpuir.add_to_llvmir(pm, options.arch, __HIP_FTZ)
|
|
307
|
+
passes.common.add_canonicalizer(pm)
|
|
308
|
+
passes.common.add_cse(pm)
|
|
309
|
+
|
|
310
|
+
passes.convert.add_cf_to_llvmir(pm)
|
|
311
|
+
passes.convert.add_arith_to_llvmir(pm)
|
|
312
|
+
passes.common.add_canonicalizer(pm)
|
|
313
|
+
passes.common.add_cse(pm)
|
|
314
|
+
passes.common.add_symbol_dce(pm)
|
|
315
|
+
|
|
316
|
+
if options.schedule_hint.lower() != "none":
|
|
317
|
+
amd.passes.ttgpuir.lower_instruction_sched_hints(pm, options.arch, options.num_stages)
|
|
318
|
+
|
|
319
|
+
# This can not be moved below the di_scope pass
|
|
320
|
+
if HIPBackend.instrumentation:
|
|
321
|
+
HIPBackend.instrumentation.patch("llvmir_to_llvm", pm, mod.context)
|
|
322
|
+
|
|
323
|
+
if not knobs.compilation.disable_line_info:
|
|
324
|
+
passes.llvmir.add_di_scope(pm)
|
|
325
|
+
|
|
326
|
+
amd.passes.ttgpuir.add_builtin_func_to_llvmir(pm, __HIP_FTZ)
|
|
327
|
+
pm.run(mod)
|
|
328
|
+
|
|
329
|
+
# LLVM-IR (MLIR) -> LLVM-IR (LLVM)
|
|
330
|
+
llvm.init_targets()
|
|
331
|
+
context = llvm.context()
|
|
332
|
+
llvm_mod = llvm.to_module(mod, context)
|
|
333
|
+
amd.attach_target_triple(llvm_mod)
|
|
334
|
+
target_features = ''
|
|
335
|
+
if knobs.compilation.enable_asan:
|
|
336
|
+
target_features = '+xnack'
|
|
337
|
+
llvm.attach_datalayout(llvm_mod, amd.TARGET_TRIPLE, options.arch, target_features)
|
|
338
|
+
|
|
339
|
+
# Set various control constants on the LLVM module so that device
|
|
340
|
+
# libraries can resolve references to them.
|
|
341
|
+
amd.set_isa_version(llvm_mod, options.arch)
|
|
342
|
+
amd.set_abi_version(llvm_mod, 500)
|
|
343
|
+
amd.set_bool_control_constant(llvm_mod, "__oclc_finite_only_opt", False)
|
|
344
|
+
amd.set_bool_control_constant(llvm_mod, "__oclc_correctly_rounded_sqrt32", True)
|
|
345
|
+
amd.set_bool_control_constant(llvm_mod, "__oclc_unsafe_math_opt", False)
|
|
346
|
+
amd.set_bool_control_constant(llvm_mod, "__oclc_wavefrontsize64", options.warp_size == 64)
|
|
347
|
+
|
|
348
|
+
# Set kernel attributes first given this may affect later optimizations.
|
|
349
|
+
fns = [fn for fn in llvm_mod.get_functions() if not fn.is_declaration()]
|
|
350
|
+
# The public kernel should be kernel 0.
|
|
351
|
+
fns[0].set_calling_conv(amd.CALLING_CONV_AMDGPU_KERNEL)
|
|
352
|
+
fns[0].add_fn_attr("amdgpu-flat-work-group-size", f"1,{options.num_warps*options.warp_size}")
|
|
353
|
+
# LLVM AMDGPU backend supports the attribute "amdgpu-waves-per-eu"="<min>[, <max>]".
|
|
354
|
+
# This attribute may be attached to a kernel function definition and is an optimization hint.
|
|
355
|
+
# <min> parameter specifies the requested minimum number of waves per EU, and optional <max> parameter
|
|
356
|
+
# specifies the requested maximum number of waves per EU (must be greater than <min> if specified).
|
|
357
|
+
# If <max> is omitted, then there is no restriction on the maximum number of waves per EU other than
|
|
358
|
+
# the one dictated by the hardware for which the kernel is compiled. Passing 0, 0 as <min>, <max>
|
|
359
|
+
# implies the default behavior (no limits).
|
|
360
|
+
fns[0].add_fn_attr("amdgpu-waves-per-eu", f"{options.waves_per_eu}")
|
|
361
|
+
denormal_mode = "preserve-sign" if options.allow_flush_denorm else "ieee"
|
|
362
|
+
fns[0].add_fn_attr("denormal-fp-math-f32", denormal_mode)
|
|
363
|
+
if knobs.compilation.enable_asan:
|
|
364
|
+
fns[0].add_fn_target_feature("+xnack")
|
|
365
|
+
fns[0].add_fn_asan_attr()
|
|
366
|
+
|
|
367
|
+
# Hint the compiler that we'd like the firmware to set the kernel arguments
|
|
368
|
+
# to user SGPRs so that the kernel does not need to s_load its arguments
|
|
369
|
+
# from memory.
|
|
370
|
+
amd.set_all_fn_arg_inreg(fns[0])
|
|
371
|
+
|
|
372
|
+
if knobs.compilation.enable_asan:
|
|
373
|
+
default_libdir = Path(__file__).parent / 'lib'
|
|
374
|
+
paths = [
|
|
375
|
+
str(default_libdir / 'asanrtl.bc'),
|
|
376
|
+
str(default_libdir / "ocml.bc"),
|
|
377
|
+
str(default_libdir / "ockl.bc")
|
|
378
|
+
]
|
|
379
|
+
llvm.link_extern_libs(llvm_mod, paths)
|
|
380
|
+
elif options.extern_libs:
|
|
381
|
+
paths = [path for (name, path) in options.extern_libs if amd.need_extern_lib(llvm_mod, name)]
|
|
382
|
+
if len(paths) > 0:
|
|
383
|
+
llvm.link_extern_libs(llvm_mod, paths)
|
|
384
|
+
|
|
385
|
+
llvm.optimize_module(llvm_mod, llvm.OPTIMIZE_O3, options.arch, '', [], options.enable_fp_fusion)
|
|
386
|
+
|
|
387
|
+
# Architectures with architected SGPRs store the workgroup id in ttmp9 (X) and ttmp7 (Y[15:0], Z[31:16]).
|
|
388
|
+
# These attributes are used to determine if Z should be masked out when loading Y. They are inferred during
|
|
389
|
+
# optimize_module from calls to @llvm.amdgcn.workgroup.id.x/y/z(). We cannot rely on this because a
|
|
390
|
+
# dispatch dimensions might be used even if there is no program_id() call for it.
|
|
391
|
+
if amd.has_architected_sgprs(options.arch):
|
|
392
|
+
fns[0].remove_fn_attr("amdgpu-no-workgroup-id-x")
|
|
393
|
+
fns[0].remove_fn_attr("amdgpu-no-workgroup-id-y")
|
|
394
|
+
fns[0].remove_fn_attr("amdgpu-no-workgroup-id-z")
|
|
395
|
+
|
|
396
|
+
if knobs.amd.scalarize_packed_fops:
|
|
397
|
+
amd.add_scalarize_packed_fops_llvm_pass(fns[0])
|
|
398
|
+
|
|
399
|
+
# Get some metadata
|
|
400
|
+
metadata["shared"] = src.get_int_attr("ttg.shared")
|
|
401
|
+
metadata["profile_scratch_size"] = src.get_int_attr("ttg.profile_scratch_memory_size") or 0
|
|
402
|
+
metadata["profile_scratch_align"] = src.get_int_attr("ttg.profile_scratch_memory_alignment") or 1
|
|
403
|
+
|
|
404
|
+
amd.cleanup_bitcode_metadata(llvm_mod)
|
|
405
|
+
# Disable inlining of print related functions,
|
|
406
|
+
# because inlining of these function could slow down compilation significantly
|
|
407
|
+
amd.disable_print_inline(llvm_mod)
|
|
408
|
+
return str(llvm_mod)
|
|
409
|
+
|
|
410
|
+
@staticmethod
|
|
411
|
+
def make_amdgcn(src, metadata, options):
|
|
412
|
+
# Find kernel names (there should only be one)
|
|
413
|
+
# We get the name at the last possible step to accommodate `triton.compile`
|
|
414
|
+
# on user-provided LLVM
|
|
415
|
+
names = re.findall(r"define amdgpu_kernel void @([a-zA-Z_][a-zA-Z0-9_]*)", src)
|
|
416
|
+
assert len(names) == 1
|
|
417
|
+
metadata["name"] = names[0]
|
|
418
|
+
# llvm -> hsaco
|
|
419
|
+
flags = []
|
|
420
|
+
# The sink-insts-to-avoid-spills flag asks LLVM backend to sink instructions
|
|
421
|
+
# into loops to avoid register spills in the MachineSinking pass, while it
|
|
422
|
+
# can also lead to regression in some cases. But from current observation,
|
|
423
|
+
# the regression is not significant. It would be better to have some heuristics.
|
|
424
|
+
if options.schedule_hint == 'attention':
|
|
425
|
+
flags.append('sink-insts-to-avoid-spills')
|
|
426
|
+
features = '-real-true16' if 'gfx11' in options.arch else ''
|
|
427
|
+
amdgcn = llvm.translate_to_asm(src, amd.TARGET_TRIPLE, options.arch, features, flags, options.enable_fp_fusion,
|
|
428
|
+
False)
|
|
429
|
+
if knobs.amd.dump_amdgcn:
|
|
430
|
+
print("// -----// AMDGCN Dump //----- //")
|
|
431
|
+
print(amdgcn)
|
|
432
|
+
return amdgcn
|
|
433
|
+
|
|
434
|
+
@staticmethod
|
|
435
|
+
def make_hsaco(src, metadata, options):
|
|
436
|
+
target_features = ''
|
|
437
|
+
if knobs.compilation.enable_asan:
|
|
438
|
+
target_features = '+xnack'
|
|
439
|
+
hsaco = amd.assemble_amdgcn(src, options.arch, target_features)
|
|
440
|
+
with tempfile.NamedTemporaryFile() as tmp_out:
|
|
441
|
+
with tempfile.NamedTemporaryFile() as tmp_in:
|
|
442
|
+
with open(tmp_in.name, "wb") as fd_in:
|
|
443
|
+
fd_in.write(hsaco)
|
|
444
|
+
amd.link_hsaco(tmp_in.name, tmp_out.name)
|
|
445
|
+
with open(tmp_out.name, "rb") as fd_out:
|
|
446
|
+
ret = fd_out.read()
|
|
447
|
+
return ret
|
|
448
|
+
|
|
449
|
+
def add_stages(self, stages, options, language):
|
|
450
|
+
if language == Language.TRITON:
|
|
451
|
+
stages["ttir"] = lambda src, metadata: self.make_ttir(src, metadata, options)
|
|
452
|
+
stages["ttgir"] = lambda src, metadata: self.make_ttgir(src, metadata, options)
|
|
453
|
+
elif language == Language.GLUON:
|
|
454
|
+
stages["ttgir"] = lambda src, metadata: self.gluon_to_ttgir(src, metadata, options)
|
|
455
|
+
stages["llir"] = lambda src, metadata: self.make_llir(src, metadata, options)
|
|
456
|
+
stages["amdgcn"] = lambda src, metadata: self.make_amdgcn(src, metadata, options)
|
|
457
|
+
stages["hsaco"] = lambda src, metadata: self.make_hsaco(src, metadata, options)
|
|
458
|
+
|
|
459
|
+
@functools.lru_cache()
|
|
460
|
+
def hash(self):
|
|
461
|
+
return f'{self.target}'
|