cuequivariance-ops-cu12 0.6.0__py3-none-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cuequivariance-ops-cu12 might be problematic. Click here for more details.

Files changed (37) hide show
  1. cuequivariance_ops/VERSION +1 -0
  2. cuequivariance_ops/__init__.py +42 -0
  3. cuequivariance_ops/_version.py +20 -0
  4. cuequivariance_ops/common/common.hpp +98 -0
  5. cuequivariance_ops/common/nvtx.hpp +29 -0
  6. cuequivariance_ops/equivariance/batch_dimension.hh +15 -0
  7. cuequivariance_ops/equivariance/dtypes.hh +65 -0
  8. cuequivariance_ops/equivariance/fused_tensor_product.cuh +297 -0
  9. cuequivariance_ops/equivariance/indexed_linear.hh +36 -0
  10. cuequivariance_ops/equivariance/run_fmha.h +192 -0
  11. cuequivariance_ops/equivariance/run_fmha_cudafree.h +77 -0
  12. cuequivariance_ops/equivariance/segmented_transpose.cuh +40 -0
  13. cuequivariance_ops/equivariance/tensor_product_uniform_1d_jit.hh +38 -0
  14. cuequivariance_ops/lib/libcue_ops.so +0 -0
  15. cuequivariance_ops/sleep.hh +18 -0
  16. cuequivariance_ops/triton/__init__.py +66 -0
  17. cuequivariance_ops/triton/cache/fused_sigmoid_gated_dual_gemm_backward_pregemm_kernel_wrapper.10.0.json +37192 -0
  18. cuequivariance_ops/triton/cache/fused_sigmoid_gated_dual_gemm_backward_pregemm_kernel_wrapper.8.0.json +37133 -0
  19. cuequivariance_ops/triton/cache/fused_sigmoid_gated_dual_gemm_backward_pregemm_kernel_wrapper.8.6.json +37133 -0
  20. cuequivariance_ops/triton/cache/fused_sigmoid_gated_dual_gemm_backward_pregemm_kernel_wrapper.8.9.json +37132 -0
  21. cuequivariance_ops/triton/cache/fused_sigmoid_gated_dual_gemm_backward_pregemm_kernel_wrapper.9.0.json +74262 -0
  22. cuequivariance_ops/triton/cache/fused_sigmoid_gated_dual_gemm_forward_kernel_wrapper.10.0.json +48482 -0
  23. cuequivariance_ops/triton/cache/fused_sigmoid_gated_dual_gemm_forward_kernel_wrapper.8.0.json +55693 -0
  24. cuequivariance_ops/triton/cache/fused_sigmoid_gated_dual_gemm_forward_kernel_wrapper.8.6.json +55692 -0
  25. cuequivariance_ops/triton/cache/fused_sigmoid_gated_dual_gemm_forward_kernel_wrapper.8.9.json +55693 -0
  26. cuequivariance_ops/triton/cache/fused_sigmoid_gated_dual_gemm_forward_kernel_wrapper.9.0.json +111382 -0
  27. cuequivariance_ops/triton/cache_manager.py +259 -0
  28. cuequivariance_ops/triton/fused_layer_norm_triton.py +518 -0
  29. cuequivariance_ops/triton/gated_gemm_triton.py +380 -0
  30. cuequivariance_ops/triton/pair_bias.py +324 -0
  31. cuequivariance_ops/triton/tuning_decorator.py +177 -0
  32. cuequivariance_ops/triton/utils.py +28 -0
  33. cuequivariance_ops_cu12-0.6.0.dist-info/METADATA +182 -0
  34. cuequivariance_ops_cu12-0.6.0.dist-info/RECORD +37 -0
  35. cuequivariance_ops_cu12-0.6.0.dist-info/WHEEL +6 -0
  36. cuequivariance_ops_cu12-0.6.0.dist-info/licenses/LICENSE +142 -0
  37. cuequivariance_ops_cu12-0.6.0.dist-info/licenses/Third_party_attr.txt +24 -0
@@ -0,0 +1,259 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: LicenseRef-NvidiaProprietary
3
+ #
4
+ # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
5
+ # property and proprietary rights in and to this material, related
6
+ # documentation and any modifications thereto. Any use, reproduction,
7
+ # disclosure or distribution of this material and related documentation
8
+ # without an express license agreement from NVIDIA CORPORATION or
9
+ # its affiliates is strictly prohibited.
10
+
11
+ import hashlib
12
+ import json
13
+ import logging
14
+ import math
15
+ import os
16
+ from multiprocessing import Lock
17
+ from pathlib import Path
18
+ from typing import Any
19
+
20
+ import pynvml
21
+ from platformdirs import user_cache_dir
22
+
23
+ # Configure logging
24
+ logger = logging.getLogger(__name__)
25
+
26
+ FILE_LOCK = Lock()
27
+
28
+
29
+ def get_triton_tuning_mode():
30
+ cueq_at = os.getenv("CUEQ_TRITON_TUNING")
31
+ if cueq_at is not None and cueq_at not in ["AOT", "ONDEMAND"]:
32
+ logger.error(f"CUEQ_TRITON_TUNING setting not recognized: {cueq_at}.\n")
33
+ return cueq_at
34
+
35
+
36
+ def is_docker():
37
+ cgroup = Path("/proc/self/cgroup")
38
+ return Path("/.dockerenv").is_file() or (
39
+ cgroup.is_file() and "docker" in cgroup.read_text()
40
+ )
41
+
42
+
43
+ def overridden_cache_dir():
44
+ return os.getenv("CUEQ_TRITON_CACHE_DIR")
45
+
46
+
47
+ def get_triton_cache_dir() -> Path:
48
+ cache_dir = overridden_cache_dir()
49
+ if cache_dir is None:
50
+ cache_dir = user_cache_dir(appname="cuequivariance-triton", ensure_exists=False)
51
+ cache_dir = Path(cache_dir)
52
+ if cache_dir.exists():
53
+ return cache_dir
54
+ cache_dir.mkdir(parents=True, exist_ok=True)
55
+ return cache_dir
56
+
57
+
58
+ def get_gpu_information():
59
+ pynvml.nvmlInit()
60
+ # Note: non-uniform multi-GPU setups are not supported
61
+ handle = pynvml.nvmlDeviceGetHandleByIndex(0)
62
+ name = pynvml.nvmlDeviceGetName(handle)
63
+ # pci_info = pynvml.nvmlDeviceGetPciInfo(handle)
64
+ # device_id = pci_info.pciDeviceId
65
+ # sub_device_id = pci_info.pciSubSystemId
66
+ power_limit = pynvml.nvmlDeviceGetPowerManagementLimit(handle)
67
+ max_clock_rate = pynvml.nvmlDeviceGetMaxClockInfo(
68
+ handle, pynvml.NVML_CLOCK_GRAPHICS
69
+ )
70
+ mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
71
+ gpu_core_count = pynvml.nvmlDeviceGetNumGpuCores(handle)
72
+ major, minor = pynvml.nvmlDeviceGetCudaComputeCapability(handle)
73
+
74
+ pynvml.nvmlShutdown()
75
+ return {
76
+ "name": name,
77
+ # "device_id": device_id,
78
+ # "sub_device_id": sub_device_id,
79
+ "total_memory": math.ceil(mem_info.total / (1024**3)),
80
+ "multi_processor_count": gpu_core_count // 128,
81
+ "power_limit": power_limit // 1000,
82
+ "clock_rate": max_clock_rate,
83
+ "major": major,
84
+ "minor": minor,
85
+ }
86
+
87
+
88
+ def gpu_information_to_key(information: dict) -> str:
89
+ information.pop("name", None)
90
+ key_string = "_".join(f"{value}" for value in information.values()).replace(
91
+ " ", "_"
92
+ )
93
+ hash_object = hashlib.sha256(key_string.encode())
94
+ hash_str = hash_object.hexdigest()
95
+ return hash_str
96
+
97
+
98
+ def load_json(json_file):
99
+ with FILE_LOCK:
100
+ with open(json_file, "rb") as f:
101
+ fn_cache = json.load(f)
102
+ return fn_cache
103
+
104
+
105
+ class CacheManager:
106
+ """Singleton managing the cache"""
107
+
108
+ def __init__(self):
109
+ self.gpu_cache = {}
110
+ self.gpu_information = get_gpu_information()
111
+ self.gpu_key = gpu_information_to_key(self.gpu_information)
112
+ self.site_json_path = str(os.path.join(os.path.dirname(__file__), "cache"))
113
+ self.json_path = str(get_triton_cache_dir())
114
+ self.dirty = {}
115
+
116
+ if os.getenv("CUEQ_TRITON_IGNORE_EXISTING_CACHE") == "1":
117
+ logger.warning(
118
+ f"\n!!!!!! CUEQ_TRITON_IGNORE_EXISTING_CACHE is ON - previously saved setting will be ignored !!!!!!\n"
119
+ f"CUEQ_TRITON_TUNING is set to {self.aot_mode}\n"
120
+ f"The tuning changes will be written to {self.json_path}"
121
+ )
122
+
123
+ if (
124
+ self.aot_mode is not None
125
+ and is_docker()
126
+ and os.getenv("HOME") == "/root"
127
+ and not overridden_cache_dir()
128
+ ):
129
+ logger.warning(
130
+ f"\n!!!!!! CUEQ_TRITON_TUNING is set to {self.aot_mode} and you are running as root in a Docker container. !!!!!!\n"
131
+ f"The tuning changes will be written to {self.json_path}"
132
+ "Please remember to commit the container - otherwise any tuning changes will be lost on container restart."
133
+ )
134
+
135
+ # define aot_mode as a property to allow the environment variable to change during runtime
136
+ @property
137
+ def aot_mode(self):
138
+ return get_triton_tuning_mode()
139
+
140
+ def load_cache(self, fn_key: str) -> dict:
141
+ # load the json file and store it in the cache-dict
142
+ # if the file does not exist, create an empty dict for the specified function
143
+ fn_cache = {}
144
+ gpu_cache = {}
145
+ best_key = None
146
+
147
+ major, minor = self.gpu_information["major"], self.gpu_information["minor"]
148
+ basename = f"{fn_key}.{major}.{minor}.json"
149
+ json_file = f"{self.json_path}/{basename}"
150
+
151
+ def result(self, gpu_cache):
152
+ # empty cache or fuzzy match, update for possible save
153
+ if best_key or not gpu_cache:
154
+ gpu_cache["gpu_information"] = self.gpu_information
155
+ self.gpu_cache[fn_key] = gpu_cache
156
+ return gpu_cache
157
+
158
+ if os.getenv("CUEQ_TRITON_IGNORE_EXISTING_CACHE"):
159
+ return result(self, gpu_cache)
160
+
161
+ try:
162
+ fn_cache = load_json(json_file)
163
+
164
+ except Exception as e0:
165
+ site_json_file = f"{self.site_json_path}/{basename}"
166
+ try:
167
+ fn_cache = load_json(site_json_file)
168
+ except Exception as e:
169
+ logger.warning(
170
+ f"Error reading system-wide triton tuning cache file: {site_json_file}\n{e}\n"
171
+ f"Error reading users triton tuning cache file {json_file}:\n{e0}"
172
+ )
173
+ pass
174
+ if fn_cache:
175
+ gpu_cache = fn_cache.get(self.gpu_key)
176
+ if gpu_cache is None:
177
+ # do a fuzzy match of config:
178
+ def within_10_percent(a, b, key):
179
+ a = int(a[key])
180
+ b = int(b[key])
181
+ return abs(a - b) / (a + b) < 0.2
182
+
183
+ def full_match(a, b):
184
+ # matching clock & memory
185
+ return (
186
+ a["total_memory"] == b["total_memory"]
187
+ and a["clock_rate"] == b["clock_rate"]
188
+ )
189
+
190
+ def partial_match(a, b):
191
+ # matching clk or memory whichever matches
192
+ return within_10_percent(a, b, "total_memory") or within_10_percent(
193
+ a, b, "clock_rate"
194
+ )
195
+
196
+ for key in fn_cache:
197
+ conf = fn_cache[key].get("gpu_information")
198
+ if conf:
199
+ if full_match(conf, self.gpu_information):
200
+ best_key = key
201
+ break
202
+ elif partial_match(conf, self.gpu_information):
203
+ best_key = key
204
+ if best_key is None:
205
+ # just pick the first entry there
206
+ best_key = next(iter(fn_cache))
207
+ gpu_cache = fn_cache[best_key]
208
+
209
+ return result(self, gpu_cache)
210
+
211
+ def save_cache(self, fn_key: str) -> None:
212
+ # save cache-dict to json file
213
+ major, minor = self.gpu_information["major"], self.gpu_information["minor"]
214
+ basename = f"{fn_key}.{major}.{minor}.json"
215
+ json_file = os.path.join(self.json_path, basename)
216
+
217
+ # Load existing data from the file if it exists
218
+ if os.path.exists(json_file):
219
+ with FILE_LOCK, open(json_file, "rb") as f:
220
+ existing_data = json.load(f)
221
+ else:
222
+ existing_data = {}
223
+ # Update the entry for our GPU key with our data
224
+ existing_data.setdefault(self.gpu_key, {}).update(self.gpu_cache[fn_key])
225
+ self.gpu_cache[fn_key] = existing_data[self.gpu_key]
226
+ merged_data = existing_data
227
+ temp_file = f"{json_file}.{os.getpid()}.tmp"
228
+ try:
229
+ # Save the merged data back to the file
230
+ with FILE_LOCK:
231
+ with open(temp_file, "w") as f:
232
+ json.dump(merged_data, f, indent=4)
233
+ os.replace(temp_file, json_file)
234
+ except Exception as e:
235
+ logger.warning(f"Warning: Failed to write autotune cache: {e}")
236
+
237
+ # Clear the dirty flag
238
+ del self.dirty[fn_key]
239
+
240
+ def get(self, fn_key: str, inp_key: str) -> Any:
241
+ # get value from cache
242
+ # if necessary, load json first
243
+ gpu_cache = self.gpu_cache.get(fn_key)
244
+ if gpu_cache is None:
245
+ gpu_cache = self.load_cache(fn_key)
246
+ # check if fn_key and inp_key exist in cache
247
+ return gpu_cache.get(inp_key)
248
+
249
+ def set(self, fn_key: str, inp_key: str, value: Any) -> None:
250
+ # write value to cache-dict
251
+ self.gpu_cache[fn_key][inp_key] = value
252
+ self.dirty[fn_key] = 1
253
+
254
+
255
+ cache_manager = CacheManager()
256
+
257
+
258
+ def get_cache_manager():
259
+ return cache_manager