triton-windows 3.3.1.post19__cp311-cp311-win_amd64.whl → 3.3.1.post21__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of triton-windows might be problematic. Click here for more details.

Files changed (108) hide show
  1. triton/_C/libtriton.pyd +0 -0
  2. triton/backends/amd/driver.py +6 -1
  3. triton/backends/nvidia/compiler.py +1 -3
  4. triton/backends/nvidia/driver.py +7 -3
  5. triton/runtime/autotuner.py +2 -2
  6. triton/runtime/build.py +5 -5
  7. triton/windows_utils.py +11 -4
  8. {triton_windows-3.3.1.post19.dist-info → triton_windows-3.3.1.post21.dist-info}/METADATA +1 -1
  9. {triton_windows-3.3.1.post19.dist-info → triton_windows-3.3.1.post21.dist-info}/RECORD +11 -108
  10. triton/backends/amd/include/hip/amd_detail/amd_channel_descriptor.h +0 -358
  11. triton/backends/amd/include/hip/amd_detail/amd_device_functions.h +0 -1010
  12. triton/backends/amd/include/hip/amd_detail/amd_hip_atomic.h +0 -1638
  13. triton/backends/amd/include/hip/amd_detail/amd_hip_bf16.h +0 -1814
  14. triton/backends/amd/include/hip/amd_detail/amd_hip_bfloat16.h +0 -293
  15. triton/backends/amd/include/hip/amd_detail/amd_hip_common.h +0 -32
  16. triton/backends/amd/include/hip/amd_detail/amd_hip_complex.h +0 -174
  17. triton/backends/amd/include/hip/amd_detail/amd_hip_cooperative_groups.h +0 -835
  18. triton/backends/amd/include/hip/amd_detail/amd_hip_fp16.h +0 -1809
  19. triton/backends/amd/include/hip/amd_detail/amd_hip_fp8.h +0 -1391
  20. triton/backends/amd/include/hip/amd_detail/amd_hip_gl_interop.h +0 -108
  21. triton/backends/amd/include/hip/amd_detail/amd_hip_math_constants.h +0 -124
  22. triton/backends/amd/include/hip/amd_detail/amd_hip_runtime.h +0 -405
  23. triton/backends/amd/include/hip/amd_detail/amd_hip_runtime_pt_api.h +0 -196
  24. triton/backends/amd/include/hip/amd_detail/amd_hip_unsafe_atomics.h +0 -565
  25. triton/backends/amd/include/hip/amd_detail/amd_hip_vector_types.h +0 -2226
  26. triton/backends/amd/include/hip/amd_detail/amd_math_functions.h +0 -104
  27. triton/backends/amd/include/hip/amd_detail/amd_surface_functions.h +0 -244
  28. triton/backends/amd/include/hip/amd_detail/amd_warp_functions.h +0 -538
  29. triton/backends/amd/include/hip/amd_detail/amd_warp_sync_functions.h +0 -288
  30. triton/backends/amd/include/hip/amd_detail/concepts.hpp +0 -30
  31. triton/backends/amd/include/hip/amd_detail/device_library_decls.h +0 -133
  32. triton/backends/amd/include/hip/amd_detail/functional_grid_launch.hpp +0 -218
  33. triton/backends/amd/include/hip/amd_detail/grid_launch.h +0 -67
  34. triton/backends/amd/include/hip/amd_detail/grid_launch.hpp +0 -50
  35. triton/backends/amd/include/hip/amd_detail/grid_launch_GGL.hpp +0 -26
  36. triton/backends/amd/include/hip/amd_detail/helpers.hpp +0 -137
  37. triton/backends/amd/include/hip/amd_detail/hip_api_trace.hpp +0 -1446
  38. triton/backends/amd/include/hip/amd_detail/hip_assert.h +0 -101
  39. triton/backends/amd/include/hip/amd_detail/hip_cooperative_groups_helper.h +0 -242
  40. triton/backends/amd/include/hip/amd_detail/hip_fp16_gcc.h +0 -254
  41. triton/backends/amd/include/hip/amd_detail/hip_fp16_math_fwd.h +0 -96
  42. triton/backends/amd/include/hip/amd_detail/hip_ldg.h +0 -100
  43. triton/backends/amd/include/hip/amd_detail/hip_prof_str.h +0 -10570
  44. triton/backends/amd/include/hip/amd_detail/hip_runtime_prof.h +0 -78
  45. triton/backends/amd/include/hip/amd_detail/host_defines.h +0 -184
  46. triton/backends/amd/include/hip/amd_detail/hsa_helpers.hpp +0 -102
  47. triton/backends/amd/include/hip/amd_detail/macro_based_grid_launch.hpp +0 -798
  48. triton/backends/amd/include/hip/amd_detail/math_fwd.h +0 -698
  49. triton/backends/amd/include/hip/amd_detail/ockl_image.h +0 -177
  50. triton/backends/amd/include/hip/amd_detail/program_state.hpp +0 -107
  51. triton/backends/amd/include/hip/amd_detail/texture_fetch_functions.h +0 -491
  52. triton/backends/amd/include/hip/amd_detail/texture_indirect_functions.h +0 -478
  53. triton/backends/amd/include/hip/channel_descriptor.h +0 -39
  54. triton/backends/amd/include/hip/device_functions.h +0 -38
  55. triton/backends/amd/include/hip/driver_types.h +0 -468
  56. triton/backends/amd/include/hip/hip_bf16.h +0 -36
  57. triton/backends/amd/include/hip/hip_bfloat16.h +0 -44
  58. triton/backends/amd/include/hip/hip_common.h +0 -100
  59. triton/backends/amd/include/hip/hip_complex.h +0 -38
  60. triton/backends/amd/include/hip/hip_cooperative_groups.h +0 -46
  61. triton/backends/amd/include/hip/hip_deprecated.h +0 -95
  62. triton/backends/amd/include/hip/hip_ext.h +0 -161
  63. triton/backends/amd/include/hip/hip_fp16.h +0 -36
  64. triton/backends/amd/include/hip/hip_fp8.h +0 -33
  65. triton/backends/amd/include/hip/hip_gl_interop.h +0 -32
  66. triton/backends/amd/include/hip/hip_hcc.h +0 -24
  67. triton/backends/amd/include/hip/hip_math_constants.h +0 -36
  68. triton/backends/amd/include/hip/hip_profile.h +0 -27
  69. triton/backends/amd/include/hip/hip_runtime.h +0 -75
  70. triton/backends/amd/include/hip/hip_runtime_api.h +0 -9261
  71. triton/backends/amd/include/hip/hip_texture_types.h +0 -29
  72. triton/backends/amd/include/hip/hip_vector_types.h +0 -41
  73. triton/backends/amd/include/hip/hip_version.h +0 -17
  74. triton/backends/amd/include/hip/hiprtc.h +0 -421
  75. triton/backends/amd/include/hip/library_types.h +0 -78
  76. triton/backends/amd/include/hip/math_functions.h +0 -42
  77. triton/backends/amd/include/hip/surface_types.h +0 -63
  78. triton/backends/amd/include/hip/texture_types.h +0 -194
  79. triton/backends/amd/include/hsa/Brig.h +0 -1131
  80. triton/backends/amd/include/hsa/amd_hsa_common.h +0 -91
  81. triton/backends/amd/include/hsa/amd_hsa_elf.h +0 -462
  82. triton/backends/amd/include/hsa/amd_hsa_kernel_code.h +0 -269
  83. triton/backends/amd/include/hsa/amd_hsa_queue.h +0 -109
  84. triton/backends/amd/include/hsa/amd_hsa_signal.h +0 -80
  85. triton/backends/amd/include/hsa/hsa.h +0 -5738
  86. triton/backends/amd/include/hsa/hsa_amd_tool.h +0 -91
  87. triton/backends/amd/include/hsa/hsa_api_trace.h +0 -579
  88. triton/backends/amd/include/hsa/hsa_api_trace_version.h +0 -68
  89. triton/backends/amd/include/hsa/hsa_ext_amd.h +0 -3146
  90. triton/backends/amd/include/hsa/hsa_ext_finalize.h +0 -531
  91. triton/backends/amd/include/hsa/hsa_ext_image.h +0 -1454
  92. triton/backends/amd/include/hsa/hsa_ven_amd_aqlprofile.h +0 -488
  93. triton/backends/amd/include/hsa/hsa_ven_amd_loader.h +0 -667
  94. triton/backends/amd/include/hsa/hsa_ven_amd_pc_sampling.h +0 -416
  95. triton/backends/amd/include/roctracer/ext/prof_protocol.h +0 -107
  96. triton/backends/amd/include/roctracer/hip_ostream_ops.h +0 -4515
  97. triton/backends/amd/include/roctracer/hsa_ostream_ops.h +0 -1727
  98. triton/backends/amd/include/roctracer/hsa_prof_str.h +0 -3059
  99. triton/backends/amd/include/roctracer/roctracer.h +0 -779
  100. triton/backends/amd/include/roctracer/roctracer_ext.h +0 -81
  101. triton/backends/amd/include/roctracer/roctracer_hcc.h +0 -24
  102. triton/backends/amd/include/roctracer/roctracer_hip.h +0 -37
  103. triton/backends/amd/include/roctracer/roctracer_hsa.h +0 -112
  104. triton/backends/amd/include/roctracer/roctracer_plugin.h +0 -137
  105. triton/backends/amd/include/roctracer/roctracer_roctx.h +0 -67
  106. triton/backends/amd/include/roctracer/roctx.h +0 -229
  107. {triton_windows-3.3.1.post19.dist-info → triton_windows-3.3.1.post21.dist-info}/WHEEL +0 -0
  108. {triton_windows-3.3.1.post19.dist-info → triton_windows-3.3.1.post21.dist-info}/top_level.txt +0 -0
@@ -1,565 +0,0 @@
1
- /*
2
- Copyright (c) 2021 - 2023 Advanced Micro Devices, Inc. All rights reserved.
3
-
4
- Permission is hereby granted, free of charge, to any person obtaining a copy
5
- of this software and associated documentation files (the "Software"), to deal
6
- in the Software without restriction, including without limitation the rights
7
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8
- copies of the Software, and to permit persons to whom the Software is
9
- furnished to do so, subject to the following conditions:
10
-
11
- The above copyright notice and this permission notice shall be included in
12
- all copies or substantial portions of the Software.
13
-
14
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20
- THE SOFTWARE.
21
- */
22
-
23
- #pragma once
24
-
25
- #ifdef __cplusplus
26
-
27
- /**
28
- * @brief Unsafe floating point rmw atomic add.
29
- *
30
- * Performs a relaxed read-modify-write floating point atomic add with
31
- * device memory scope. Original value at \p addr is returned and
32
- * the value of \p addr is updated to have the original value plus \p value
33
- *
34
- * @note This operation currently only performs different operations for
35
- * the gfx90a target. Other devices continue to use safe atomics.
36
- *
37
- * It can be used to generate code that uses fast hardware floating point atomic
38
- * operations which may handle rounding and subnormal values differently than
39
- * non-atomic floating point operations.
40
- *
41
- * The operation is not always safe and can have undefined behavior unless
42
- * following condition are met:
43
- *
44
- * - \p addr is at least 4 bytes aligned
45
- * - If \p addr is a global segment address, it is in a coarse grain allocation.
46
- * Passing in global segment addresses in fine grain allocations will result in
47
- * undefined behavior and is not supported.
48
- *
49
- * @param [in,out] addr Pointer to value to be increment by \p value.
50
- * @param [in] value Value by \p addr is to be incremented.
51
- * @return Original value contained in \p addr.
52
- */
53
- __device__ inline float unsafeAtomicAdd(float* addr, float value) {
54
- #if defined(__gfx90a__) && \
55
- __has_builtin(__builtin_amdgcn_is_shared) && \
56
- __has_builtin(__builtin_amdgcn_is_private) && \
57
- __has_builtin(__builtin_amdgcn_ds_atomic_fadd_f32) && \
58
- __has_builtin(__builtin_amdgcn_global_atomic_fadd_f32)
59
- if (__builtin_amdgcn_is_shared(
60
- (const __attribute__((address_space(0))) void*)addr))
61
- return __builtin_amdgcn_ds_atomic_fadd_f32(addr, value);
62
- else if (__builtin_amdgcn_is_private(
63
- (const __attribute__((address_space(0))) void*)addr)) {
64
- float temp = *addr;
65
- *addr = temp + value;
66
- return temp;
67
- }
68
- else
69
- return __builtin_amdgcn_global_atomic_fadd_f32(addr, value);
70
- #elif __has_builtin(__hip_atomic_fetch_add)
71
- return __hip_atomic_fetch_add(addr, value, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
72
- #else
73
- return __atomic_fetch_add(addr, value, __ATOMIC_RELAXED);
74
- #endif
75
- }
76
-
77
- /**
78
- * @brief Unsafe floating point rmw atomic max.
79
- *
80
- * Performs a relaxed read-modify-write floating point atomic max with
81
- * device memory scope. The original value at \p addr is returned and
82
- * the value at \p addr is replaced by \p val if greater.
83
- *
84
- * @note This operation is currently identical to that performed by
85
- * atomicMax and is included for completeness.
86
- *
87
- * @param [in,out] addr Pointer to value to be updated
88
- * @param [in] val Value used to update the value at \p addr.
89
- * @return Original value contained in \p addr.
90
- */
91
- __device__ inline float unsafeAtomicMax(float* addr, float val) {
92
- #if __has_builtin(__hip_atomic_load) && \
93
- __has_builtin(__hip_atomic_compare_exchange_strong)
94
- float value = __hip_atomic_load(addr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
95
- bool done = false;
96
- while (!done && value < val) {
97
- done = __hip_atomic_compare_exchange_strong(addr, &value, val,
98
- __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
99
- }
100
- return value;
101
- #else
102
- unsigned int *uaddr = (unsigned int *)addr;
103
- unsigned int value = __atomic_load_n(uaddr, __ATOMIC_RELAXED);
104
- bool done = false;
105
- while (!done && __uint_as_float(value) < val) {
106
- done = __atomic_compare_exchange_n(uaddr, &value, __float_as_uint(val), false,
107
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
108
- }
109
- return __uint_as_float(value);
110
- #endif
111
- }
112
-
113
- /**
114
- * @brief Unsafe floating point rmw atomic min.
115
- *
116
- * Performs a relaxed read-modify-write floating point atomic min with
117
- * device memory scope. The original value at \p addr is returned and
118
- * the value at \p addr is replaced by \p val if lesser.
119
- *
120
- * @note This operation is currently identical to that performed by
121
- * atomicMin and is included for completeness.
122
- *
123
- * @param [in,out] addr Pointer to value to be updated
124
- * @param [in] val Value used to update the value at \p addr.
125
- * @return Original value contained in \p addr.
126
- */
127
- __device__ inline float unsafeAtomicMin(float* addr, float val) {
128
- #if __has_builtin(__hip_atomic_load) && \
129
- __has_builtin(__hip_atomic_compare_exchange_strong)
130
- float value = __hip_atomic_load(addr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
131
- bool done = false;
132
- while (!done && value > val) {
133
- done = __hip_atomic_compare_exchange_strong(addr, &value, val,
134
- __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
135
- }
136
- return value;
137
- #else
138
- unsigned int *uaddr = (unsigned int *)addr;
139
- unsigned int value = __atomic_load_n(uaddr, __ATOMIC_RELAXED);
140
- bool done = false;
141
- while (!done && __uint_as_float(value) > val) {
142
- done = __atomic_compare_exchange_n(uaddr, &value, __float_as_uint(val), false,
143
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
144
- }
145
- return __uint_as_float(value);
146
- #endif
147
- }
148
-
149
- /**
150
- * @brief Unsafe double precision rmw atomic add.
151
- *
152
- * Performs a relaxed read-modify-write double precision atomic add with
153
- * device memory scope. Original value at \p addr is returned and
154
- * the value of \p addr is updated to have the original value plus \p value
155
- *
156
- * @note This operation currently only performs different operations for
157
- * the gfx90a target. Other devices continue to use safe atomics.
158
- *
159
- * It can be used to generate code that uses fast hardware floating point atomic
160
- * operations which may handle rounding and subnormal values differently than
161
- * non-atomic floating point operations.
162
- *
163
- * The operation is not always safe and can have undefined behavior unless
164
- * following condition are met:
165
- *
166
- * - \p addr is at least 8 byte aligned
167
- * - If \p addr is a global segment address, it is in a coarse grain allocation.
168
- * Passing in global segment addresses in fine grain allocations will result in
169
- * undefined behavior and are not supported.
170
- *
171
- * @param [in,out] addr Pointer to value to be updated.
172
- * @param [in] value Value by \p addr is to be incremented.
173
- * @return Original value contained in \p addr.
174
- */
175
- __device__ inline double unsafeAtomicAdd(double* addr, double value) {
176
- #if defined(__gfx90a__) && __has_builtin(__builtin_amdgcn_flat_atomic_fadd_f64)
177
- return __builtin_amdgcn_flat_atomic_fadd_f64(addr, value);
178
- #elif defined (__hip_atomic_fetch_add)
179
- return __hip_atomic_fetch_add(addr, value, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
180
- #else
181
- return __atomic_fetch_add(addr, value, __ATOMIC_RELAXED);
182
- #endif
183
- }
184
-
185
- /**
186
- * @brief Unsafe double precision rmw atomic max.
187
- *
188
- * Performs a relaxed read-modify-write double precision atomic max with
189
- * device memory scope. Original value at \p addr is returned and
190
- * the value of \p addr is updated with \p val if greater.
191
- *
192
- * @note This operation currently only performs different operations for
193
- * the gfx90a target. Other devices continue to use safe atomics.
194
- *
195
- * It can be used to generate code that uses fast hardware floating point atomic
196
- * operations which may handle rounding and subnormal values differently than
197
- * non-atomic floating point operations.
198
- *
199
- * The operation is not always safe and can have undefined behavior unless
200
- * following condition are met:
201
- *
202
- * - \p addr is at least 8 byte aligned
203
- * - If \p addr is a global segment address, it is in a coarse grain allocation.
204
- * Passing in global segment addresses in fine grain allocations will result in
205
- * undefined behavior and are not supported.
206
- *
207
- * @param [in,out] addr Pointer to value to be updated.
208
- * @param [in] val Value used to updated the contents at \p addr
209
- * @return Original value contained at \p addr.
210
- */
211
- __device__ inline double unsafeAtomicMax(double* addr, double val) {
212
- #if (defined(__gfx90a__) || defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__)) && \
213
- __has_builtin(__builtin_amdgcn_flat_atomic_fmax_f64)
214
- return __builtin_amdgcn_flat_atomic_fmax_f64(addr, val);
215
- #else
216
- #if __has_builtin(__hip_atomic_load) && \
217
- __has_builtin(__hip_atomic_compare_exchange_strong)
218
- double value = __hip_atomic_load(addr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
219
- bool done = false;
220
- while (!done && value < val) {
221
- done = __hip_atomic_compare_exchange_strong(addr, &value, val,
222
- __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
223
- }
224
- return value;
225
- #else
226
- unsigned long long *uaddr = (unsigned long long *)addr;
227
- unsigned long long value = __atomic_load_n(uaddr, __ATOMIC_RELAXED);
228
- bool done = false;
229
- while (!done && __longlong_as_double(value) < val) {
230
- done = __atomic_compare_exchange_n(uaddr, &value, __double_as_longlong(val), false,
231
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
232
- }
233
- return __longlong_as_double(value);
234
- #endif
235
- #endif
236
- }
237
-
238
- /**
239
- * @brief Unsafe double precision rmw atomic min.
240
- *
241
- * Performs a relaxed read-modify-write double precision atomic min with
242
- * device memory scope. Original value at \p addr is returned and
243
- * the value of \p addr is updated with \p val if lesser.
244
- *
245
- * @note This operation currently only performs different operations for
246
- * the gfx90a target. Other devices continue to use safe atomics.
247
- *
248
- * It can be used to generate code that uses fast hardware floating point atomic
249
- * operations which may handle rounding and subnormal values differently than
250
- * non-atomic floating point operations.
251
- *
252
- * The operation is not always safe and can have undefined behavior unless
253
- * following condition are met:
254
- *
255
- * - \p addr is at least 8 byte aligned
256
- * - If \p addr is a global segment address, it is in a coarse grain allocation.
257
- * Passing in global segment addresses in fine grain allocations will result in
258
- * undefined behavior and are not supported.
259
- *
260
- * @param [in,out] addr Pointer to value to be updated.
261
- * @param [in] val Value used to updated the contents at \p addr
262
- * @return Original value contained at \p addr.
263
- */
264
- __device__ inline double unsafeAtomicMin(double* addr, double val) {
265
- #if (defined(__gfx90a__) || defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__)) && \
266
- __has_builtin(__builtin_amdgcn_flat_atomic_fmin_f64)
267
- return __builtin_amdgcn_flat_atomic_fmin_f64(addr, val);
268
- #else
269
- #if __has_builtin(__hip_atomic_load) && \
270
- __has_builtin(__hip_atomic_compare_exchange_strong)
271
- double value = __hip_atomic_load(addr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
272
- bool done = false;
273
- while (!done && value > val) {
274
- done = __hip_atomic_compare_exchange_strong(addr, &value, val,
275
- __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
276
- }
277
- return value;
278
- #else
279
- unsigned long long *uaddr = (unsigned long long *)addr;
280
- unsigned long long value = __atomic_load_n(uaddr, __ATOMIC_RELAXED);
281
- bool done = false;
282
- while (!done && __longlong_as_double(value) > val) {
283
- done = __atomic_compare_exchange_n(uaddr, &value, __double_as_longlong(val), false,
284
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
285
- }
286
- return __longlong_as_double(value);
287
- #endif
288
- #endif
289
- }
290
-
291
- /**
292
- * @brief Safe floating point rmw atomic add.
293
- *
294
- * Performs a relaxed read-modify-write floating point atomic add with
295
- * device memory scope. Original value at \p addr is returned and
296
- * the value of \p addr is updated to have the original value plus \p value
297
- *
298
- * @note This operation ensures that, on all targets, we produce safe atomics.
299
- * This will be the case even when -munsafe-fp-atomics is passed into the compiler.
300
- *
301
- * @param [in,out] addr Pointer to value to be increment by \p value.
302
- * @param [in] value Value by \p addr is to be incremented.
303
- * @return Original value contained in \p addr.
304
- */
305
- __device__ inline float safeAtomicAdd(float* addr, float value) {
306
- #if defined(__gfx908__) || defined(__gfx941__) \
307
- || ((defined(__gfx90a__) || defined(__gfx940__) || defined(__gfx942__)) \
308
- && !__has_builtin(__hip_atomic_fetch_add))
309
- // On gfx908, we can generate unsafe FP32 atomic add that does not follow all
310
- // IEEE rules when -munsafe-fp-atomics is passed. Do a CAS loop emulation instead.
311
- // On gfx941, we can generate unsafe FP32 atomic add that may not always happen atomically,
312
- // so we need to force a CAS loop emulation to ensure safety.
313
- // On gfx90a, gfx940 and gfx942 if we do not have the __hip_atomic_fetch_add builtin, we
314
- // need to force a CAS loop here.
315
- float old_val;
316
- #if __has_builtin(__hip_atomic_load)
317
- old_val = __hip_atomic_load(addr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
318
- #else // !__has_builtin(__hip_atomic_load)
319
- old_val = __uint_as_float(__atomic_load_n(reinterpret_cast<unsigned int*>(addr), __ATOMIC_RELAXED));
320
- #endif // __has_builtin(__hip_atomic_load)
321
- float expected, temp;
322
- do {
323
- temp = expected = old_val;
324
- #if __has_builtin(__hip_atomic_compare_exchange_strong)
325
- __hip_atomic_compare_exchange_strong(addr, &expected, old_val + value, __ATOMIC_RELAXED,
326
- __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
327
- #else // !__has_builtin(__hip_atomic_compare_exchange_strong)
328
- __atomic_compare_exchange_n(addr, &expected, old_val + value, false,
329
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
330
- #endif // __has_builtin(__hip_atomic_compare_exchange_strong)
331
- old_val = expected;
332
- } while (__float_as_uint(temp) != __float_as_uint(old_val));
333
- return old_val;
334
- #elif defined(__gfx90a__)
335
- // On gfx90a, with the __hip_atomic_fetch_add builtin, relaxed system-scope
336
- // atomics will produce safe CAS loops, but are otherwise not different than
337
- // agent-scope atomics. This logic is only applicable for gfx90a, and should
338
- // not be assumed on other architectures.
339
- return __hip_atomic_fetch_add(addr, value, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
340
- #elif __has_builtin(__hip_atomic_fetch_add)
341
- return __hip_atomic_fetch_add(addr, value, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
342
- #else
343
- return __atomic_fetch_add(addr, value, __ATOMIC_RELAXED);
344
- #endif
345
- }
346
-
347
- /**
348
- * @brief Safe floating point rmw atomic max.
349
- *
350
- * Performs a relaxed read-modify-write floating point atomic max with
351
- * device memory scope. The original value at \p addr is returned and
352
- * the value at \p addr is replaced by \p val if greater.
353
- *
354
- * @note This operation ensures that, on all targets, we produce safe atomics.
355
- * This will be the case even when -munsafe-fp-atomics is passed into the compiler.
356
- *
357
- * @param [in,out] addr Pointer to value to be updated
358
- * @param [in] val Value used to update the value at \p addr.
359
- * @return Original value contained in \p addr.
360
- */
361
- __device__ inline float safeAtomicMax(float* addr, float val) {
362
- #if __has_builtin(__hip_atomic_load) && \
363
- __has_builtin(__hip_atomic_compare_exchange_strong)
364
- float value = __hip_atomic_load(addr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
365
- bool done = false;
366
- while (!done && value < val) {
367
- done = __hip_atomic_compare_exchange_strong(addr, &value, val,
368
- __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
369
- }
370
- return value;
371
- #else
372
- unsigned int *uaddr = (unsigned int *)addr;
373
- unsigned int value = __atomic_load_n(uaddr, __ATOMIC_RELAXED);
374
- bool done = false;
375
- while (!done && __uint_as_float(value) < val) {
376
- done = __atomic_compare_exchange_n(uaddr, &value, __float_as_uint(val), false,
377
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
378
- }
379
- return __uint_as_float(value);
380
- #endif
381
- }
382
-
383
- /**
384
- * @brief Safe floating point rmw atomic min.
385
- *
386
- * Performs a relaxed read-modify-write floating point atomic min with
387
- * device memory scope. The original value at \p addr is returned and
388
- * the value at \p addr is replaced by \p val if lesser.
389
- *
390
- * @note This operation ensures that, on all targets, we produce safe atomics.
391
- * This will be the case even when -munsafe-fp-atomics is passed into the compiler.
392
- *
393
- * @param [in,out] addr Pointer to value to be updated
394
- * @param [in] val Value used to update the value at \p addr.
395
- * @return Original value contained in \p addr.
396
- */
397
- __device__ inline float safeAtomicMin(float* addr, float val) {
398
- #if __has_builtin(__hip_atomic_load) && \
399
- __has_builtin(__hip_atomic_compare_exchange_strong)
400
- float value = __hip_atomic_load(addr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
401
- bool done = false;
402
- while (!done && value > val) {
403
- done = __hip_atomic_compare_exchange_strong(addr, &value, val,
404
- __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
405
- }
406
- return value;
407
- #else
408
- unsigned int *uaddr = (unsigned int *)addr;
409
- unsigned int value = __atomic_load_n(uaddr, __ATOMIC_RELAXED);
410
- bool done = false;
411
- while (!done && __uint_as_float(value) > val) {
412
- done = __atomic_compare_exchange_n(uaddr, &value, __float_as_uint(val), false,
413
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
414
- }
415
- return __uint_as_float(value);
416
- #endif
417
- }
418
-
419
- /**
420
- * @brief Safe double precision rmw atomic add.
421
- *
422
- * Performs a relaxed read-modify-write double precision atomic add with
423
- * device memory scope. Original value at \p addr is returned and
424
- * the value of \p addr is updated to have the original value plus \p value
425
- *
426
- * @note This operation ensures that, on all targets, we produce safe atomics.
427
- * This will be the case even when -munsafe-fp-atomics is passed into the compiler.
428
- *
429
- * @param [in,out] addr Pointer to value to be increment by \p value.
430
- * @param [in] value Value by \p addr is to be incremented.
431
- * @return Original value contained in \p addr.
432
- */
433
- __device__ inline double safeAtomicAdd(double* addr, double value) {
434
- #if defined(__gfx90a__) && __has_builtin(__hip_atomic_fetch_add)
435
- // On gfx90a, with the __hip_atomic_fetch_add builtin, relaxed system-scope
436
- // atomics will produce safe CAS loops, but are otherwise not different than
437
- // agent-scope atomics. This logic is only applicable for gfx90a, and should
438
- // not be assumed on other architectures.
439
- return __hip_atomic_fetch_add(addr, value, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
440
- #elif defined(__gfx90a__)
441
- // On gfx90a, if we do not have the __hip_atomic_fetch_add builtin, we need to
442
- // force a CAS loop here.
443
- double old_val;
444
- #if __has_builtin(__hip_atomic_load)
445
- old_val = __hip_atomic_load(addr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
446
- #else // !__has_builtin(__hip_atomic_load)
447
- old_val = __longlong_as_double(__atomic_load_n(reinterpret_cast<unsigned long long*>(addr), __ATOMIC_RELAXED));
448
- #endif // __has_builtin(__hip_atomic_load)
449
- double expected, temp;
450
- do {
451
- temp = expected = old_val;
452
- #if __has_builtin(__hip_atomic_compare_exchange_strong)
453
- __hip_atomic_compare_exchange_strong(addr, &expected, old_val + value, __ATOMIC_RELAXED,
454
- __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
455
- #else // !__has_builtin(__hip_atomic_compare_exchange_strong)
456
- __atomic_compare_exchange_n(addr, &expected, old_val + value, false,
457
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
458
- #endif // __has_builtin(__hip_atomic_compare_exchange_strong)
459
- old_val = expected;
460
- } while (__double_as_longlong(temp) != __double_as_longlong(old_val));
461
- return old_val;
462
- #else // !defined(__gfx90a__)
463
- #if __has_builtin(__hip_atomic_fetch_add)
464
- return __hip_atomic_fetch_add(addr, value, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
465
- #else // !__has_builtin(__hip_atomic_fetch_add)
466
- return __atomic_fetch_add(addr, value, __ATOMIC_RELAXED);
467
- #endif // __has_builtin(__hip_atomic_fetch_add)
468
- #endif
469
- }
470
-
471
- /**
472
- * @brief Safe double precision rmw atomic max.
473
- *
474
- * Performs a relaxed read-modify-write double precision atomic max with
475
- * device memory scope. Original value at \p addr is returned and
476
- * the value of \p addr is updated with \p val if greater.
477
- *
478
- * @note This operation ensures that, on all targets, we produce safe atomics.
479
- * This will be the case even when -munsafe-fp-atomics is passed into the compiler.
480
- *
481
- * @param [in,out] addr Pointer to value to be updated.
482
- * @param [in] val Value used to updated the contents at \p addr
483
- * @return Original value contained at \p addr.
484
- */
485
- __device__ inline double safeAtomicMax(double* addr, double val) {
486
- #if __has_builtin(__builtin_amdgcn_is_private)
487
- if (__builtin_amdgcn_is_private(
488
- (const __attribute__((address_space(0))) void*)addr)) {
489
- double old = *addr;
490
- *addr = __builtin_fmax(old, val);
491
- return old;
492
- } else {
493
- #endif
494
- #if __has_builtin(__hip_atomic_load) && \
495
- __has_builtin(__hip_atomic_compare_exchange_strong)
496
- double value = __hip_atomic_load(addr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
497
- bool done = false;
498
- while (!done && value < val) {
499
- done = __hip_atomic_compare_exchange_strong(addr, &value, val,
500
- __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
501
- }
502
- return value;
503
- #else
504
- unsigned long long *uaddr = (unsigned long long *)addr;
505
- unsigned long long value = __atomic_load_n(uaddr, __ATOMIC_RELAXED);
506
- bool done = false;
507
- while (!done && __longlong_as_double(value) < val) {
508
- done = __atomic_compare_exchange_n(uaddr, &value, __double_as_longlong(val), false,
509
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
510
- }
511
- return __longlong_as_double(value);
512
- #endif
513
- #if __has_builtin(__builtin_amdgcn_is_private)
514
- }
515
- #endif
516
- }
517
-
518
- /**
519
- * @brief Safe double precision rmw atomic min.
520
- *
521
- * Performs a relaxed read-modify-write double precision atomic min with
522
- * device memory scope. Original value at \p addr is returned and
523
- * the value of \p addr is updated with \p val if lesser.
524
- *
525
- * @note This operation ensures that, on all targets, we produce safe atomics.
526
- * This will be the case even when -munsafe-fp-atomics is passed into the compiler.
527
- *
528
- * @param [in,out] addr Pointer to value to be updated.
529
- * @param [in] val Value used to updated the contents at \p addr
530
- * @return Original value contained at \p addr.
531
- */
532
- __device__ inline double safeAtomicMin(double* addr, double val) {
533
- #if __has_builtin(__builtin_amdgcn_is_private)
534
- if (__builtin_amdgcn_is_private(
535
- (const __attribute__((address_space(0))) void*)addr)) {
536
- double old = *addr;
537
- *addr = __builtin_fmin(old, val);
538
- return old;
539
- } else {
540
- #endif
541
- #if __has_builtin(__hip_atomic_load) && \
542
- __has_builtin(__hip_atomic_compare_exchange_strong)
543
- double value = __hip_atomic_load(addr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
544
- bool done = false;
545
- while (!done && value > val) {
546
- done = __hip_atomic_compare_exchange_strong(addr, &value, val,
547
- __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
548
- }
549
- return value;
550
- #else
551
- unsigned long long *uaddr = (unsigned long long *)addr;
552
- unsigned long long value = __atomic_load_n(uaddr, __ATOMIC_RELAXED);
553
- bool done = false;
554
- while (!done && __longlong_as_double(value) > val) {
555
- done = __atomic_compare_exchange_n(uaddr, &value, __double_as_longlong(val), false,
556
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
557
- }
558
- return __longlong_as_double(value);
559
- #endif
560
- #if __has_builtin(__builtin_amdgcn_is_private)
561
- }
562
- #endif
563
- }
564
-
565
- #endif