triton-windows 3.3.1.post19__cp311-cp311-win_amd64.whl → 3.3.1.post21__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of triton-windows might be problematic. Click here for more details.

Files changed (108) hide show
  1. triton/_C/libtriton.pyd +0 -0
  2. triton/backends/amd/driver.py +6 -1
  3. triton/backends/nvidia/compiler.py +1 -3
  4. triton/backends/nvidia/driver.py +7 -3
  5. triton/runtime/autotuner.py +2 -2
  6. triton/runtime/build.py +5 -5
  7. triton/windows_utils.py +11 -4
  8. {triton_windows-3.3.1.post19.dist-info → triton_windows-3.3.1.post21.dist-info}/METADATA +1 -1
  9. {triton_windows-3.3.1.post19.dist-info → triton_windows-3.3.1.post21.dist-info}/RECORD +11 -108
  10. triton/backends/amd/include/hip/amd_detail/amd_channel_descriptor.h +0 -358
  11. triton/backends/amd/include/hip/amd_detail/amd_device_functions.h +0 -1010
  12. triton/backends/amd/include/hip/amd_detail/amd_hip_atomic.h +0 -1638
  13. triton/backends/amd/include/hip/amd_detail/amd_hip_bf16.h +0 -1814
  14. triton/backends/amd/include/hip/amd_detail/amd_hip_bfloat16.h +0 -293
  15. triton/backends/amd/include/hip/amd_detail/amd_hip_common.h +0 -32
  16. triton/backends/amd/include/hip/amd_detail/amd_hip_complex.h +0 -174
  17. triton/backends/amd/include/hip/amd_detail/amd_hip_cooperative_groups.h +0 -835
  18. triton/backends/amd/include/hip/amd_detail/amd_hip_fp16.h +0 -1809
  19. triton/backends/amd/include/hip/amd_detail/amd_hip_fp8.h +0 -1391
  20. triton/backends/amd/include/hip/amd_detail/amd_hip_gl_interop.h +0 -108
  21. triton/backends/amd/include/hip/amd_detail/amd_hip_math_constants.h +0 -124
  22. triton/backends/amd/include/hip/amd_detail/amd_hip_runtime.h +0 -405
  23. triton/backends/amd/include/hip/amd_detail/amd_hip_runtime_pt_api.h +0 -196
  24. triton/backends/amd/include/hip/amd_detail/amd_hip_unsafe_atomics.h +0 -565
  25. triton/backends/amd/include/hip/amd_detail/amd_hip_vector_types.h +0 -2226
  26. triton/backends/amd/include/hip/amd_detail/amd_math_functions.h +0 -104
  27. triton/backends/amd/include/hip/amd_detail/amd_surface_functions.h +0 -244
  28. triton/backends/amd/include/hip/amd_detail/amd_warp_functions.h +0 -538
  29. triton/backends/amd/include/hip/amd_detail/amd_warp_sync_functions.h +0 -288
  30. triton/backends/amd/include/hip/amd_detail/concepts.hpp +0 -30
  31. triton/backends/amd/include/hip/amd_detail/device_library_decls.h +0 -133
  32. triton/backends/amd/include/hip/amd_detail/functional_grid_launch.hpp +0 -218
  33. triton/backends/amd/include/hip/amd_detail/grid_launch.h +0 -67
  34. triton/backends/amd/include/hip/amd_detail/grid_launch.hpp +0 -50
  35. triton/backends/amd/include/hip/amd_detail/grid_launch_GGL.hpp +0 -26
  36. triton/backends/amd/include/hip/amd_detail/helpers.hpp +0 -137
  37. triton/backends/amd/include/hip/amd_detail/hip_api_trace.hpp +0 -1446
  38. triton/backends/amd/include/hip/amd_detail/hip_assert.h +0 -101
  39. triton/backends/amd/include/hip/amd_detail/hip_cooperative_groups_helper.h +0 -242
  40. triton/backends/amd/include/hip/amd_detail/hip_fp16_gcc.h +0 -254
  41. triton/backends/amd/include/hip/amd_detail/hip_fp16_math_fwd.h +0 -96
  42. triton/backends/amd/include/hip/amd_detail/hip_ldg.h +0 -100
  43. triton/backends/amd/include/hip/amd_detail/hip_prof_str.h +0 -10570
  44. triton/backends/amd/include/hip/amd_detail/hip_runtime_prof.h +0 -78
  45. triton/backends/amd/include/hip/amd_detail/host_defines.h +0 -184
  46. triton/backends/amd/include/hip/amd_detail/hsa_helpers.hpp +0 -102
  47. triton/backends/amd/include/hip/amd_detail/macro_based_grid_launch.hpp +0 -798
  48. triton/backends/amd/include/hip/amd_detail/math_fwd.h +0 -698
  49. triton/backends/amd/include/hip/amd_detail/ockl_image.h +0 -177
  50. triton/backends/amd/include/hip/amd_detail/program_state.hpp +0 -107
  51. triton/backends/amd/include/hip/amd_detail/texture_fetch_functions.h +0 -491
  52. triton/backends/amd/include/hip/amd_detail/texture_indirect_functions.h +0 -478
  53. triton/backends/amd/include/hip/channel_descriptor.h +0 -39
  54. triton/backends/amd/include/hip/device_functions.h +0 -38
  55. triton/backends/amd/include/hip/driver_types.h +0 -468
  56. triton/backends/amd/include/hip/hip_bf16.h +0 -36
  57. triton/backends/amd/include/hip/hip_bfloat16.h +0 -44
  58. triton/backends/amd/include/hip/hip_common.h +0 -100
  59. triton/backends/amd/include/hip/hip_complex.h +0 -38
  60. triton/backends/amd/include/hip/hip_cooperative_groups.h +0 -46
  61. triton/backends/amd/include/hip/hip_deprecated.h +0 -95
  62. triton/backends/amd/include/hip/hip_ext.h +0 -161
  63. triton/backends/amd/include/hip/hip_fp16.h +0 -36
  64. triton/backends/amd/include/hip/hip_fp8.h +0 -33
  65. triton/backends/amd/include/hip/hip_gl_interop.h +0 -32
  66. triton/backends/amd/include/hip/hip_hcc.h +0 -24
  67. triton/backends/amd/include/hip/hip_math_constants.h +0 -36
  68. triton/backends/amd/include/hip/hip_profile.h +0 -27
  69. triton/backends/amd/include/hip/hip_runtime.h +0 -75
  70. triton/backends/amd/include/hip/hip_runtime_api.h +0 -9261
  71. triton/backends/amd/include/hip/hip_texture_types.h +0 -29
  72. triton/backends/amd/include/hip/hip_vector_types.h +0 -41
  73. triton/backends/amd/include/hip/hip_version.h +0 -17
  74. triton/backends/amd/include/hip/hiprtc.h +0 -421
  75. triton/backends/amd/include/hip/library_types.h +0 -78
  76. triton/backends/amd/include/hip/math_functions.h +0 -42
  77. triton/backends/amd/include/hip/surface_types.h +0 -63
  78. triton/backends/amd/include/hip/texture_types.h +0 -194
  79. triton/backends/amd/include/hsa/Brig.h +0 -1131
  80. triton/backends/amd/include/hsa/amd_hsa_common.h +0 -91
  81. triton/backends/amd/include/hsa/amd_hsa_elf.h +0 -462
  82. triton/backends/amd/include/hsa/amd_hsa_kernel_code.h +0 -269
  83. triton/backends/amd/include/hsa/amd_hsa_queue.h +0 -109
  84. triton/backends/amd/include/hsa/amd_hsa_signal.h +0 -80
  85. triton/backends/amd/include/hsa/hsa.h +0 -5738
  86. triton/backends/amd/include/hsa/hsa_amd_tool.h +0 -91
  87. triton/backends/amd/include/hsa/hsa_api_trace.h +0 -579
  88. triton/backends/amd/include/hsa/hsa_api_trace_version.h +0 -68
  89. triton/backends/amd/include/hsa/hsa_ext_amd.h +0 -3146
  90. triton/backends/amd/include/hsa/hsa_ext_finalize.h +0 -531
  91. triton/backends/amd/include/hsa/hsa_ext_image.h +0 -1454
  92. triton/backends/amd/include/hsa/hsa_ven_amd_aqlprofile.h +0 -488
  93. triton/backends/amd/include/hsa/hsa_ven_amd_loader.h +0 -667
  94. triton/backends/amd/include/hsa/hsa_ven_amd_pc_sampling.h +0 -416
  95. triton/backends/amd/include/roctracer/ext/prof_protocol.h +0 -107
  96. triton/backends/amd/include/roctracer/hip_ostream_ops.h +0 -4515
  97. triton/backends/amd/include/roctracer/hsa_ostream_ops.h +0 -1727
  98. triton/backends/amd/include/roctracer/hsa_prof_str.h +0 -3059
  99. triton/backends/amd/include/roctracer/roctracer.h +0 -779
  100. triton/backends/amd/include/roctracer/roctracer_ext.h +0 -81
  101. triton/backends/amd/include/roctracer/roctracer_hcc.h +0 -24
  102. triton/backends/amd/include/roctracer/roctracer_hip.h +0 -37
  103. triton/backends/amd/include/roctracer/roctracer_hsa.h +0 -112
  104. triton/backends/amd/include/roctracer/roctracer_plugin.h +0 -137
  105. triton/backends/amd/include/roctracer/roctracer_roctx.h +0 -67
  106. triton/backends/amd/include/roctracer/roctx.h +0 -229
  107. {triton_windows-3.3.1.post19.dist-info → triton_windows-3.3.1.post21.dist-info}/WHEEL +0 -0
  108. {triton_windows-3.3.1.post19.dist-info → triton_windows-3.3.1.post21.dist-info}/top_level.txt +0 -0
@@ -1,835 +0,0 @@
1
- /*
2
- Copyright (c) 2015 - 2023 Advanced Micro Devices, Inc. All rights reserved.
3
-
4
- Permission is hereby granted, free of charge, to any person obtaining a copy
5
- of this software and associated documentation files (the "Software"), to deal
6
- in the Software without restriction, including without limitation the rights
7
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8
- copies of the Software, and to permit persons to whom the Software is
9
- furnished to do so, subject to the following conditions:
10
-
11
- The above copyright notice and this permission notice shall be included in
12
- all copies or substantial portions of the Software.
13
-
14
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20
- THE SOFTWARE.
21
- */
22
-
23
- /**
24
- * @file amd_detail/hip_cooperative_groups.h
25
- *
26
- * @brief Device side implementation of `Cooperative Group` feature.
27
- *
28
- * Defines new types and device API wrappers related to `Cooperative Group`
29
- * feature, which the programmer can directly use in his kernel(s) in order to
30
- * make use of this feature.
31
- */
32
- #ifndef HIP_INCLUDE_HIP_AMD_DETAIL_HIP_COOPERATIVE_GROUPS_H
33
- #define HIP_INCLUDE_HIP_AMD_DETAIL_HIP_COOPERATIVE_GROUPS_H
34
-
35
- #if __cplusplus
36
- #if !defined(__HIPCC_RTC__)
37
- #include <hip/amd_detail/hip_cooperative_groups_helper.h>
38
- #endif
39
-
40
- namespace cooperative_groups {
41
-
42
- /** @brief The base type of all cooperative group types
43
- *
44
- * \details Holds the key properties of a constructed cooperative group types
45
- * object, like the group type, its size, etc
46
- *
47
- * @note Cooperative groups feature is implemented on Linux, under developement
48
- * on Windows.
49
- */
50
- class thread_group {
51
- protected:
52
- uint32_t _type; // thread_group type
53
- uint32_t _size; // total number of threads in the tread_group
54
- uint64_t _mask; // Lanemask for coalesced and tiled partitioned group types,
55
- // LSB represents lane 0, and MSB represents lane 63
56
-
57
- // Construct a thread group, and set thread group type and other essential
58
- // thread group properties. This generic thread group is directly constructed
59
- // only when the group is supposed to contain only the calling the thread
60
- // (throurh the API - `this_thread()`), and in all other cases, this thread
61
- // group object is a sub-object of some other derived thread group object
62
- __CG_QUALIFIER__ thread_group(internal::group_type type, uint32_t size = static_cast<uint64_t>(0),
63
- uint64_t mask = static_cast<uint64_t>(0)) {
64
- _type = type;
65
- _size = size;
66
- _mask = mask;
67
- }
68
-
69
- struct _tiled_info {
70
- bool is_tiled;
71
- unsigned int size;
72
- unsigned int meta_group_rank;
73
- unsigned int meta_group_size;
74
- };
75
-
76
- struct _coalesced_info {
77
- lane_mask member_mask;
78
- unsigned int size;
79
- struct _tiled_info tiled_info;
80
- } coalesced_info;
81
-
82
- friend __CG_QUALIFIER__ thread_group tiled_partition(const thread_group& parent,
83
- unsigned int tile_size);
84
- friend class thread_block;
85
-
86
- public:
87
- // Total number of threads in the thread group, and this serves the purpose
88
- // for all derived cooperative group types since their `size` is directly
89
- // saved during the construction
90
- __CG_QUALIFIER__ uint32_t size() const { return _size; }
91
- __CG_QUALIFIER__ unsigned int cg_type() const { return _type; }
92
- // Rank of the calling thread within [0, size())
93
- __CG_QUALIFIER__ uint32_t thread_rank() const;
94
- // Is this cooperative group type valid?
95
- __CG_QUALIFIER__ bool is_valid() const;
96
- // synchronize the threads in the thread group
97
- __CG_QUALIFIER__ void sync() const;
98
- };
99
- /**
100
- *-------------------------------------------------------------------------------------------------
101
- *-------------------------------------------------------------------------------------------------
102
- * @defgroup CooperativeG Cooperative Groups
103
- * @ingroup API
104
- * @{
105
- * This section describes the cooperative groups functions of HIP runtime API.
106
- *
107
- * The cooperative groups provides flexible thread parallel programming algorithms, threads
108
- * cooperate and share data to perform collective computations.
109
- *
110
- * @note Cooperative groups feature is implemented on Linux, under developement
111
- * on Windows.
112
- *
113
- */
114
- /** \brief The multi-grid cooperative group type
115
- *
116
- * \details Represents an inter-device cooperative group type where the
117
- * participating threads within the group spans across multple
118
- * devices, running the (same) kernel on these devices
119
- * @note The multi-grid cooperative group type is implemented on Linux, under developement
120
- * on Windows.
121
- */
122
- class multi_grid_group : public thread_group {
123
- // Only these friend functions are allowed to construct an object of this class
124
- // and access its resources
125
- friend __CG_QUALIFIER__ multi_grid_group this_multi_grid();
126
-
127
- protected:
128
- // Construct mutli-grid thread group (through the API this_multi_grid())
129
- explicit __CG_QUALIFIER__ multi_grid_group(uint32_t size)
130
- : thread_group(internal::cg_multi_grid, size) {}
131
-
132
- public:
133
- // Number of invocations participating in this multi-grid group. In other
134
- // words, the number of GPUs
135
- __CG_QUALIFIER__ uint32_t num_grids() { return internal::multi_grid::num_grids(); }
136
- // Rank of this invocation. In other words, an ID number within the range
137
- // [0, num_grids()) of the GPU, this kernel is running on
138
- __CG_QUALIFIER__ uint32_t grid_rank() { return internal::multi_grid::grid_rank(); }
139
- __CG_QUALIFIER__ uint32_t thread_rank() const { return internal::multi_grid::thread_rank(); }
140
- __CG_QUALIFIER__ bool is_valid() const { return internal::multi_grid::is_valid(); }
141
- __CG_QUALIFIER__ void sync() const { internal::multi_grid::sync(); }
142
- };
143
-
144
- /** @brief User exposed API interface to construct multi-grid cooperative
145
- * group type object - `multi_grid_group`
146
- *
147
- * \details User is not allowed to directly construct an object of type
148
- * `multi_grid_group`. Instead, he should construct it through this
149
- * API function
150
- * @note This multi-grid cooperative API type is implemented on Linux, under developement
151
- * on Windows.
152
- */
153
- __CG_QUALIFIER__ multi_grid_group this_multi_grid() {
154
- return multi_grid_group(internal::multi_grid::size());
155
- }
156
-
157
- /** @brief The grid cooperative group type
158
- *
159
- * \details Represents an inter-workgroup cooperative group type where the
160
- * participating threads within the group spans across multiple
161
- * workgroups running the (same) kernel on the same device
162
- * @note This is implemented on Linux, under developement
163
- * on Windows.
164
- */
165
- class grid_group : public thread_group {
166
- // Only these friend functions are allowed to construct an object of this class
167
- // and access its resources
168
- friend __CG_QUALIFIER__ grid_group this_grid();
169
-
170
- protected:
171
- // Construct grid thread group (through the API this_grid())
172
- explicit __CG_QUALIFIER__ grid_group(uint32_t size) : thread_group(internal::cg_grid, size) {}
173
-
174
- public:
175
- __CG_QUALIFIER__ uint32_t thread_rank() const { return internal::grid::thread_rank(); }
176
- __CG_QUALIFIER__ bool is_valid() const { return internal::grid::is_valid(); }
177
- __CG_QUALIFIER__ void sync() const { internal::grid::sync(); }
178
- };
179
-
180
- /** @brief User exposed API interface to construct grid cooperative group type
181
- * object - `grid_group`
182
- *
183
- * \details User is not allowed to directly construct an object of type
184
- * `multi_grid_group`. Instead, he should construct it through this
185
- * API function
186
- * @note This function is implemented on Linux, under developement
187
- * on Windows.
188
- */
189
- __CG_QUALIFIER__ grid_group this_grid() { return grid_group(internal::grid::size()); }
190
-
191
- /** @brief The workgroup (thread-block in CUDA terminology) cooperative group
192
- * type
193
- *
194
- * \details Represents an intra-workgroup cooperative group type where the
195
- * participating threads within the group are exactly the same threads
196
- * which are participated in the currently executing `workgroup`
197
- * @note This is implemented on Linux, under developement
198
- * on Windows.
199
- */
200
- class thread_block : public thread_group {
201
- // Only these friend functions are allowed to construct an object of thi
202
- // class and access its resources
203
- friend __CG_QUALIFIER__ thread_block this_thread_block();
204
- friend __CG_QUALIFIER__ thread_group tiled_partition(const thread_group& parent,
205
- unsigned int tile_size);
206
- friend __CG_QUALIFIER__ thread_group tiled_partition(const thread_block& parent,
207
- unsigned int tile_size);
208
- protected:
209
- // Construct a workgroup thread group (through the API this_thread_block())
210
- explicit __CG_QUALIFIER__ thread_block(uint32_t size)
211
- : thread_group(internal::cg_workgroup, size) {}
212
-
213
- __CG_QUALIFIER__ thread_group new_tiled_group(unsigned int tile_size) const {
214
- const bool pow2 = ((tile_size & (tile_size - 1)) == 0);
215
- // Invalid tile size, assert
216
- if (!tile_size || (tile_size > __AMDGCN_WAVEFRONT_SIZE) || !pow2) {
217
- __hip_assert(false && "invalid tile size");
218
- }
219
-
220
- auto block_size = size();
221
- auto rank = thread_rank();
222
- auto partitions = (block_size + tile_size - 1) / tile_size;
223
- auto tail = (partitions * tile_size) - block_size;
224
- auto partition_size = tile_size - tail * (rank >= (partitions - 1) * tile_size);
225
- thread_group tiledGroup = thread_group(internal::cg_tiled_group, partition_size);
226
-
227
- tiledGroup.coalesced_info.tiled_info.size = tile_size;
228
- tiledGroup.coalesced_info.tiled_info.is_tiled = true;
229
- tiledGroup.coalesced_info.tiled_info.meta_group_rank = rank / tile_size;
230
- tiledGroup.coalesced_info.tiled_info.meta_group_size = partitions;
231
- return tiledGroup;
232
- }
233
-
234
- public:
235
- // 3-dimensional block index within the grid
236
- __CG_STATIC_QUALIFIER__ dim3 group_index() { return internal::workgroup::group_index(); }
237
- // 3-dimensional thread index within the block
238
- __CG_STATIC_QUALIFIER__ dim3 thread_index() { return internal::workgroup::thread_index(); }
239
- __CG_STATIC_QUALIFIER__ uint32_t thread_rank() { return internal::workgroup::thread_rank(); }
240
- __CG_STATIC_QUALIFIER__ uint32_t size() { return internal::workgroup::size(); }
241
- __CG_STATIC_QUALIFIER__ bool is_valid() { return internal::workgroup::is_valid(); }
242
- __CG_STATIC_QUALIFIER__ void sync() { internal::workgroup::sync(); }
243
- __CG_QUALIFIER__ dim3 group_dim() { return internal::workgroup::block_dim(); }
244
- };
245
-
246
- /** \brief User exposed API interface to construct workgroup cooperative
247
- * group type object - `thread_block`.
248
- *
249
- * \details User is not allowed to directly construct an object of type
250
- * `thread_block`. Instead, he should construct it through this API
251
- * function.
252
- * @note This function is implemented on Linux, under developement
253
- * on Windows.
254
- */
255
- __CG_QUALIFIER__ thread_block this_thread_block() {
256
- return thread_block(internal::workgroup::size());
257
- }
258
-
259
- /** \brief The tiled_group cooperative group type
260
- *
261
- * \details Represents one tiled thread group in a wavefront.
262
- * This group type also supports sub-wave level intrinsics.
263
- * @note This is implemented on Linux, under developement
264
- * on Windows.
265
- */
266
-
267
- class tiled_group : public thread_group {
268
- private:
269
- friend __CG_QUALIFIER__ thread_group tiled_partition(const thread_group& parent,
270
- unsigned int tile_size);
271
- friend __CG_QUALIFIER__ tiled_group tiled_partition(const tiled_group& parent,
272
- unsigned int tile_size);
273
-
274
- __CG_QUALIFIER__ tiled_group new_tiled_group(unsigned int tile_size) const {
275
- const bool pow2 = ((tile_size & (tile_size - 1)) == 0);
276
-
277
- if (!tile_size || (tile_size > __AMDGCN_WAVEFRONT_SIZE) || !pow2) {
278
- __hip_assert(false && "invalid tile size");
279
- }
280
-
281
- if (size() <= tile_size) {
282
- return *this;
283
- }
284
-
285
- tiled_group tiledGroup = tiled_group(tile_size);
286
- tiledGroup.coalesced_info.tiled_info.is_tiled = true;
287
- return tiledGroup;
288
- }
289
-
290
- protected:
291
- explicit __CG_QUALIFIER__ tiled_group(unsigned int tileSize)
292
- : thread_group(internal::cg_tiled_group, tileSize) {
293
- coalesced_info.tiled_info.size = tileSize;
294
- coalesced_info.tiled_info.is_tiled = true;
295
- }
296
-
297
- public:
298
- __CG_QUALIFIER__ unsigned int size() const { return (coalesced_info.tiled_info.size); }
299
-
300
- __CG_QUALIFIER__ unsigned int thread_rank() const {
301
- return (internal::workgroup::thread_rank() & (coalesced_info.tiled_info.size - 1));
302
- }
303
-
304
- __CG_QUALIFIER__ void sync() const {
305
- internal::tiled_group::sync();
306
- }
307
- };
308
-
309
- /** \brief The coalesced_group cooperative group type
310
- *
311
- * \details Represents a active thread group in a wavefront.
312
- * This group type also supports sub-wave level intrinsics.
313
- * @note This is implemented on Linux, under developement
314
- * on Windows.
315
- */
316
- class coalesced_group : public thread_group {
317
- private:
318
- friend __CG_QUALIFIER__ coalesced_group coalesced_threads();
319
- friend __CG_QUALIFIER__ thread_group tiled_partition(const thread_group& parent, unsigned int tile_size);
320
- friend __CG_QUALIFIER__ coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tile_size);
321
-
322
- __CG_QUALIFIER__ coalesced_group new_tiled_group(unsigned int tile_size) const {
323
- const bool pow2 = ((tile_size & (tile_size - 1)) == 0);
324
-
325
- if (!tile_size || (tile_size > size()) || !pow2) {
326
- return coalesced_group(0);
327
- }
328
-
329
- // If a tiled group is passed to be partitioned further into a coalesced_group.
330
- // prepare a mask for further partitioning it so that it stays coalesced.
331
- if (coalesced_info.tiled_info.is_tiled) {
332
- unsigned int base_offset = (thread_rank() & (~(tile_size - 1)));
333
- unsigned int masklength = min(static_cast<unsigned int>(size()) - base_offset, tile_size);
334
- lane_mask member_mask = static_cast<lane_mask>(-1) >> (__AMDGCN_WAVEFRONT_SIZE - masklength);
335
-
336
- member_mask <<= (__lane_id() & ~(tile_size - 1));
337
- coalesced_group coalesced_tile = coalesced_group(member_mask);
338
- coalesced_tile.coalesced_info.tiled_info.is_tiled = true;
339
- coalesced_tile.coalesced_info.tiled_info.meta_group_rank = thread_rank() / tile_size;
340
- coalesced_tile.coalesced_info.tiled_info.meta_group_size = size() / tile_size;
341
- return coalesced_tile;
342
- }
343
- // Here the parent coalesced_group is not partitioned.
344
- else {
345
- lane_mask member_mask = 0;
346
- unsigned int tile_rank = 0;
347
- int lanes_to_skip = ((thread_rank()) / tile_size) * tile_size;
348
-
349
- for (unsigned int i = 0; i < __AMDGCN_WAVEFRONT_SIZE; i++) {
350
- lane_mask active = coalesced_info.member_mask & (1 << i);
351
- // Make sure the lane is active
352
- if (active) {
353
- if (lanes_to_skip <= 0 && tile_rank < tile_size) {
354
- // Prepare a member_mask that is appropriate for a tile
355
- member_mask |= active;
356
- tile_rank++;
357
- }
358
- lanes_to_skip--;
359
- }
360
- }
361
- coalesced_group coalesced_tile = coalesced_group(member_mask);
362
- coalesced_tile.coalesced_info.tiled_info.meta_group_rank = thread_rank() / tile_size;
363
- coalesced_tile.coalesced_info.tiled_info.meta_group_size =
364
- (size() + tile_size - 1) / tile_size;
365
- return coalesced_tile;
366
- }
367
- return coalesced_group(0);
368
- }
369
-
370
- protected:
371
- // Constructor
372
- explicit __CG_QUALIFIER__ coalesced_group(lane_mask member_mask)
373
- : thread_group(internal::cg_coalesced_group) {
374
- coalesced_info.member_mask = member_mask; // Which threads are active
375
- coalesced_info.size = __popcll(coalesced_info.member_mask); // How many threads are active
376
- coalesced_info.tiled_info.is_tiled = false; // Not a partitioned group
377
- coalesced_info.tiled_info.meta_group_rank = 0;
378
- coalesced_info.tiled_info.meta_group_size = 1;
379
- }
380
-
381
- public:
382
- __CG_QUALIFIER__ unsigned int size() const {
383
- return coalesced_info.size;
384
- }
385
-
386
- __CG_QUALIFIER__ unsigned int thread_rank() const {
387
- return internal::coalesced_group::masked_bit_count(coalesced_info.member_mask);
388
- }
389
-
390
- __CG_QUALIFIER__ void sync() const {
391
- internal::coalesced_group::sync();
392
- }
393
-
394
- __CG_QUALIFIER__ unsigned int meta_group_rank() const {
395
- return coalesced_info.tiled_info.meta_group_rank;
396
- }
397
-
398
- __CG_QUALIFIER__ unsigned int meta_group_size() const {
399
- return coalesced_info.tiled_info.meta_group_size;
400
- }
401
-
402
- template <class T>
403
- __CG_QUALIFIER__ T shfl(T var, int srcRank) const {
404
- static_assert(is_valid_type<T>::value, "Neither an integer or float type.");
405
-
406
- srcRank = srcRank % static_cast<int>(size());
407
-
408
- int lane = (size() == __AMDGCN_WAVEFRONT_SIZE) ? srcRank
409
- : (__AMDGCN_WAVEFRONT_SIZE == 64) ? __fns64(coalesced_info.member_mask, 0, (srcRank + 1))
410
- : __fns32(coalesced_info.member_mask, 0, (srcRank + 1));
411
-
412
- return __shfl(var, lane, __AMDGCN_WAVEFRONT_SIZE);
413
- }
414
-
415
- template <class T>
416
- __CG_QUALIFIER__ T shfl_down(T var, unsigned int lane_delta) const {
417
- static_assert(is_valid_type<T>::value, "Neither an integer or float type.");
418
-
419
- // Note: The cuda implementation appears to use the remainder of lane_delta
420
- // and WARP_SIZE as the shift value rather than lane_delta itself.
421
- // This is not described in the documentation and is not done here.
422
-
423
- if (size() == __AMDGCN_WAVEFRONT_SIZE) {
424
- return __shfl_down(var, lane_delta, __AMDGCN_WAVEFRONT_SIZE);
425
- }
426
-
427
- int lane;
428
- if (__AMDGCN_WAVEFRONT_SIZE == 64) {
429
- lane = __fns64(coalesced_info.member_mask, __lane_id(), lane_delta + 1);
430
- }
431
- else {
432
- lane = __fns32(coalesced_info.member_mask, __lane_id(), lane_delta + 1);
433
- }
434
-
435
- if (lane == -1) {
436
- lane = __lane_id();
437
- }
438
-
439
- return __shfl(var, lane, __AMDGCN_WAVEFRONT_SIZE);
440
- }
441
-
442
- template <class T>
443
- __CG_QUALIFIER__ T shfl_up(T var, unsigned int lane_delta) const {
444
- static_assert(is_valid_type<T>::value, "Neither an integer or float type.");
445
-
446
- // Note: The cuda implementation appears to use the remainder of lane_delta
447
- // and WARP_SIZE as the shift value rather than lane_delta itself.
448
- // This is not described in the documentation and is not done here.
449
-
450
- if (size() == __AMDGCN_WAVEFRONT_SIZE) {
451
- return __shfl_up(var, lane_delta, __AMDGCN_WAVEFRONT_SIZE);
452
- }
453
-
454
- int lane;
455
- if (__AMDGCN_WAVEFRONT_SIZE == 64) {
456
- lane = __fns64(coalesced_info.member_mask, __lane_id(), -(lane_delta + 1));
457
- }
458
- else if (__AMDGCN_WAVEFRONT_SIZE == 32) {
459
- lane = __fns32(coalesced_info.member_mask, __lane_id(), -(lane_delta + 1));
460
- }
461
-
462
- if (lane == -1) {
463
- lane = __lane_id();
464
- }
465
-
466
- return __shfl(var, lane, __AMDGCN_WAVEFRONT_SIZE);
467
- }
468
- };
469
-
470
- /** \brief User exposed API to create coalesced groups.
471
- *
472
- * \details A collective operation that groups all active lanes into a new thread group.
473
- * @note This function is implemented on Linux, under developement
474
- * on Windows.
475
- */
476
-
477
- __CG_QUALIFIER__ coalesced_group coalesced_threads() {
478
- return cooperative_groups::coalesced_group(__builtin_amdgcn_read_exec());
479
- }
480
-
481
- /**
482
- * Implemenation of all publicly exposed base class APIs
483
- * @note This function is implemented on Linux, under developement
484
- * on Windows.
485
- */
486
- __CG_QUALIFIER__ uint32_t thread_group::thread_rank() const {
487
- switch (this->_type) {
488
- case internal::cg_multi_grid: {
489
- return (static_cast<const multi_grid_group*>(this)->thread_rank());
490
- }
491
- case internal::cg_grid: {
492
- return (static_cast<const grid_group*>(this)->thread_rank());
493
- }
494
- case internal::cg_workgroup: {
495
- return (static_cast<const thread_block*>(this)->thread_rank());
496
- }
497
- case internal::cg_tiled_group: {
498
- return (static_cast<const tiled_group*>(this)->thread_rank());
499
- }
500
- case internal::cg_coalesced_group: {
501
- return (static_cast<const coalesced_group*>(this)->thread_rank());
502
- }
503
- default: {
504
- __hip_assert(false && "invalid cooperative group type");
505
- return -1;
506
- }
507
- }
508
- }
509
- /**
510
- * Implemenation of all publicly exposed thread group API
511
- * @note This function is implemented on Linux, under developement
512
- * on Windows.
513
- */
514
- __CG_QUALIFIER__ bool thread_group::is_valid() const {
515
- switch (this->_type) {
516
- case internal::cg_multi_grid: {
517
- return (static_cast<const multi_grid_group*>(this)->is_valid());
518
- }
519
- case internal::cg_grid: {
520
- return (static_cast<const grid_group*>(this)->is_valid());
521
- }
522
- case internal::cg_workgroup: {
523
- return (static_cast<const thread_block*>(this)->is_valid());
524
- }
525
- case internal::cg_tiled_group: {
526
- return (static_cast<const tiled_group*>(this)->is_valid());
527
- }
528
- case internal::cg_coalesced_group: {
529
- return (static_cast<const coalesced_group*>(this)->is_valid());
530
- }
531
- default: {
532
- __hip_assert(false && "invalid cooperative group type");
533
- return false;
534
- }
535
- }
536
- }
537
- /**
538
- * Implemenation of all publicly exposed thread group sync API
539
- * @note This function is implemented on Linux, under developement
540
- * on Windows.
541
- */
542
- __CG_QUALIFIER__ void thread_group::sync() const {
543
- switch (this->_type) {
544
- case internal::cg_multi_grid: {
545
- static_cast<const multi_grid_group*>(this)->sync();
546
- break;
547
- }
548
- case internal::cg_grid: {
549
- static_cast<const grid_group*>(this)->sync();
550
- break;
551
- }
552
- case internal::cg_workgroup: {
553
- static_cast<const thread_block*>(this)->sync();
554
- break;
555
- }
556
- case internal::cg_tiled_group: {
557
- static_cast<const tiled_group*>(this)->sync();
558
- break;
559
- }
560
- case internal::cg_coalesced_group: {
561
- static_cast<const coalesced_group*>(this)->sync();
562
- break;
563
- }
564
- default: {
565
- __hip_assert(false && "invalid cooperative group type");
566
- }
567
- }
568
- }
569
-
570
- /**
571
- * Implemenation of publicly exposed `wrapper` API on top of basic cooperative
572
- * group type APIs
573
- * @note This function is implemented on Linux, under developement
574
- * on Windows.
575
- */
576
- template <class CGTy> __CG_QUALIFIER__ uint32_t group_size(CGTy const& g) { return g.size(); }
577
- /**
578
- * Implemenation of publicly exposed `wrapper` API on top of basic cooperative
579
- * group type APIs
580
- * @note This function is implemented on Linux, under developement
581
- * on Windows.
582
- */
583
- template <class CGTy> __CG_QUALIFIER__ uint32_t thread_rank(CGTy const& g) {
584
- return g.thread_rank();
585
- }
586
- /**
587
- * Implemenation of publicly exposed `wrapper` API on top of basic cooperative
588
- * group type APIs
589
- * @note This function is implemented on Linux, under developement
590
- * on Windows.
591
- */
592
- template <class CGTy> __CG_QUALIFIER__ bool is_valid(CGTy const& g) { return g.is_valid(); }
593
- /**
594
- * Implemenation of publicly exposed `wrapper` API on top of basic cooperative
595
- * group type APIs
596
- * @note This function is implemented on Linux, under developement
597
- * on Windows.
598
- */
599
- template <class CGTy> __CG_QUALIFIER__ void sync(CGTy const& g) { g.sync(); }
600
- /**
601
- * template class tile_base
602
- * @note This class is implemented on Linux, under developement
603
- * on Windows.
604
- */
605
- template <unsigned int tileSize> class tile_base {
606
- protected:
607
- _CG_STATIC_CONST_DECL_ unsigned int numThreads = tileSize;
608
-
609
- public:
610
- // Rank of the thread within this tile
611
- _CG_STATIC_CONST_DECL_ unsigned int thread_rank() {
612
- return (internal::workgroup::thread_rank() & (numThreads - 1));
613
- }
614
-
615
- // Number of threads within this tile
616
- __CG_STATIC_QUALIFIER__ unsigned int size() { return numThreads; }
617
- };
618
- /**
619
- * template class thread_block_tile_base
620
- * @note This class is implemented on Linux, under developement
621
- * on Windows.
622
- */
623
- template <unsigned int size> class thread_block_tile_base : public tile_base<size> {
624
- static_assert(is_valid_tile_size<size>::value,
625
- "Tile size is either not a power of 2 or greater than the wavefront size");
626
- using tile_base<size>::numThreads;
627
-
628
- public:
629
- __CG_STATIC_QUALIFIER__ void sync() {
630
- internal::tiled_group::sync();
631
- }
632
-
633
- template <class T> __CG_QUALIFIER__ T shfl(T var, int srcRank) const {
634
- static_assert(is_valid_type<T>::value, "Neither an integer or float type.");
635
- return (__shfl(var, srcRank, numThreads));
636
- }
637
-
638
- template <class T> __CG_QUALIFIER__ T shfl_down(T var, unsigned int lane_delta) const {
639
- static_assert(is_valid_type<T>::value, "Neither an integer or float type.");
640
- return (__shfl_down(var, lane_delta, numThreads));
641
- }
642
-
643
- template <class T> __CG_QUALIFIER__ T shfl_up(T var, unsigned int lane_delta) const {
644
- static_assert(is_valid_type<T>::value, "Neither an integer or float type.");
645
- return (__shfl_up(var, lane_delta, numThreads));
646
- }
647
-
648
- template <class T> __CG_QUALIFIER__ T shfl_xor(T var, unsigned int laneMask) const {
649
- static_assert(is_valid_type<T>::value, "Neither an integer or float type.");
650
- return (__shfl_xor(var, laneMask, numThreads));
651
- }
652
- };
653
- /** \brief User exposed API that captures the state of the parent group pre-partition
654
- */
655
- template <unsigned int tileSize, typename ParentCGTy>
656
- class parent_group_info {
657
- public:
658
- // Returns the linear rank of the group within the set of tiles partitioned
659
- // from a parent group (bounded by meta_group_size)
660
- __CG_STATIC_QUALIFIER__ unsigned int meta_group_rank() {
661
- return ParentCGTy::thread_rank() / tileSize;
662
- }
663
-
664
- // Returns the number of groups created when the parent group was partitioned.
665
- __CG_STATIC_QUALIFIER__ unsigned int meta_group_size() {
666
- return (ParentCGTy::size() + tileSize - 1) / tileSize;
667
- }
668
- };
669
-
670
- /** \brief Group type - thread_block_tile
671
- *
672
- * \details Represents one tile of thread group.
673
- * @note This type is implemented on Linux, under developement
674
- * on Windows.
675
- */
676
- template <unsigned int tileSize, class ParentCGTy>
677
- class thread_block_tile_type : public thread_block_tile_base<tileSize>,
678
- public tiled_group,
679
- public parent_group_info<tileSize, ParentCGTy> {
680
- _CG_STATIC_CONST_DECL_ unsigned int numThreads = tileSize;
681
- typedef thread_block_tile_base<numThreads> tbtBase;
682
- protected:
683
- __CG_QUALIFIER__ thread_block_tile_type() : tiled_group(numThreads) {
684
- coalesced_info.tiled_info.size = numThreads;
685
- coalesced_info.tiled_info.is_tiled = true;
686
- }
687
- public:
688
- using tbtBase::size;
689
- using tbtBase::sync;
690
- using tbtBase::thread_rank;
691
- };
692
-
693
- // Partial template specialization
694
- template <unsigned int tileSize>
695
- class thread_block_tile_type<tileSize, void> : public thread_block_tile_base<tileSize>,
696
- public tiled_group
697
- {
698
- _CG_STATIC_CONST_DECL_ unsigned int numThreads = tileSize;
699
-
700
- typedef thread_block_tile_base<numThreads> tbtBase;
701
-
702
- protected:
703
-
704
- __CG_QUALIFIER__ thread_block_tile_type(unsigned int meta_group_rank, unsigned int meta_group_size)
705
- : tiled_group(numThreads) {
706
- coalesced_info.tiled_info.size = numThreads;
707
- coalesced_info.tiled_info.is_tiled = true;
708
- coalesced_info.tiled_info.meta_group_rank = meta_group_rank;
709
- coalesced_info.tiled_info.meta_group_size = meta_group_size;
710
- }
711
-
712
- public:
713
- using tbtBase::size;
714
- using tbtBase::sync;
715
- using tbtBase::thread_rank;
716
-
717
- __CG_QUALIFIER__ unsigned int meta_group_rank() const {
718
- return coalesced_info.tiled_info.meta_group_rank;
719
- }
720
-
721
- __CG_QUALIFIER__ unsigned int meta_group_size() const {
722
- return coalesced_info.tiled_info.meta_group_size;
723
- }
724
- // end of operative group
725
- /**
726
- * @}
727
- */
728
- };
729
-
730
-
731
- /** \brief User exposed API to partition groups.
732
- *
733
- * \details A collective operation that partitions the parent group into a one-dimensional,
734
- * row-major, tiling of subgroups.
735
- */
736
-
737
- __CG_QUALIFIER__ thread_group tiled_partition(const thread_group& parent, unsigned int tile_size) {
738
- if (parent.cg_type() == internal::cg_tiled_group) {
739
- const tiled_group* cg = static_cast<const tiled_group*>(&parent);
740
- return cg->new_tiled_group(tile_size);
741
- }
742
- else if(parent.cg_type() == internal::cg_coalesced_group) {
743
- const coalesced_group* cg = static_cast<const coalesced_group*>(&parent);
744
- return cg->new_tiled_group(tile_size);
745
- }
746
- else {
747
- const thread_block* tb = static_cast<const thread_block*>(&parent);
748
- return tb->new_tiled_group(tile_size);
749
- }
750
- }
751
-
752
- // Thread block type overload
753
- __CG_QUALIFIER__ thread_group tiled_partition(const thread_block& parent, unsigned int tile_size) {
754
- return (parent.new_tiled_group(tile_size));
755
- }
756
-
757
- __CG_QUALIFIER__ tiled_group tiled_partition(const tiled_group& parent, unsigned int tile_size) {
758
- return (parent.new_tiled_group(tile_size));
759
- }
760
-
761
- // If a coalesced group is passed to be partitioned, it should remain coalesced
762
- __CG_QUALIFIER__ coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tile_size) {
763
- return (parent.new_tiled_group(tile_size));
764
- }
765
-
766
- template <unsigned int size, class ParentCGTy> class thread_block_tile;
767
-
768
- namespace impl {
769
- template <unsigned int size, class ParentCGTy> class thread_block_tile_internal;
770
-
771
- template <unsigned int size, class ParentCGTy>
772
- class thread_block_tile_internal : public thread_block_tile_type<size, ParentCGTy> {
773
- protected:
774
- template <unsigned int tbtSize, class tbtParentT>
775
- __CG_QUALIFIER__ thread_block_tile_internal(
776
- const thread_block_tile_internal<tbtSize, tbtParentT>& g)
777
- : thread_block_tile_type<size, ParentCGTy>(g.meta_group_rank(), g.meta_group_size()) {}
778
-
779
- __CG_QUALIFIER__ thread_block_tile_internal(const thread_block& g)
780
- : thread_block_tile_type<size, ParentCGTy>() {}
781
- };
782
- } // namespace impl
783
-
784
- template <unsigned int size, class ParentCGTy>
785
- class thread_block_tile : public impl::thread_block_tile_internal<size, ParentCGTy> {
786
- protected:
787
- __CG_QUALIFIER__ thread_block_tile(const ParentCGTy& g)
788
- : impl::thread_block_tile_internal<size, ParentCGTy>(g) {}
789
-
790
- public:
791
- __CG_QUALIFIER__ operator thread_block_tile<size, void>() const {
792
- return thread_block_tile<size, void>(*this);
793
- }
794
- };
795
-
796
-
797
- template <unsigned int size>
798
- class thread_block_tile<size, void> : public impl::thread_block_tile_internal<size, void> {
799
- template <unsigned int, class ParentCGTy> friend class thread_block_tile;
800
-
801
- protected:
802
- public:
803
- template <class ParentCGTy>
804
- __CG_QUALIFIER__ thread_block_tile(const thread_block_tile<size, ParentCGTy>& g)
805
- : impl::thread_block_tile_internal<size, void>(g) {}
806
- };
807
-
808
- template <unsigned int size, class ParentCGTy = void> class thread_block_tile;
809
-
810
- namespace impl {
811
- template <unsigned int size, class ParentCGTy> struct tiled_partition_internal;
812
-
813
- template <unsigned int size>
814
- struct tiled_partition_internal<size, thread_block> : public thread_block_tile<size, thread_block> {
815
- __CG_QUALIFIER__ tiled_partition_internal(const thread_block& g)
816
- : thread_block_tile<size, thread_block>(g) {}
817
- };
818
-
819
- } // namespace impl
820
-
821
- /** \brief User exposed API to partition groups.
822
- *
823
- * \details This constructs a templated class derieved from thread_group.
824
- * The template defines tile size of the new thread group at compile time.
825
- */
826
- template <unsigned int size, class ParentCGTy>
827
- __CG_QUALIFIER__ thread_block_tile<size, ParentCGTy> tiled_partition(const ParentCGTy& g) {
828
- static_assert(is_valid_tile_size<size>::value,
829
- "Tiled partition with size > wavefront size. Currently not supported ");
830
- return impl::tiled_partition_internal<size, ParentCGTy>(g);
831
- }
832
- } // namespace cooperative_groups
833
-
834
- #endif // __cplusplus
835
- #endif // HIP_INCLUDE_HIP_AMD_DETAIL_HIP_COOPERATIVE_GROUPS_H