cuda-cccl 0.3.0__cp312-cp312-manylinux_2_24_aarch64.whl → 0.3.1__cp312-cp312-manylinux_2_24_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cuda-cccl might be problematic. Click here for more details.
- cuda/cccl/cooperative/__init__.py +7 -1
- cuda/cccl/cooperative/experimental/__init__.py +21 -5
- cuda/cccl/headers/include/cub/agent/agent_adjacent_difference.cuh +2 -5
- cuda/cccl/headers/include/cub/agent/agent_batch_memcpy.cuh +2 -5
- cuda/cccl/headers/include/cub/agent/agent_for.cuh +2 -5
- cuda/cccl/headers/include/cub/agent/agent_merge.cuh +23 -21
- cuda/cccl/headers/include/cub/agent/agent_merge_sort.cuh +21 -3
- cuda/cccl/headers/include/cub/agent/agent_radix_sort_downsweep.cuh +2 -5
- cuda/cccl/headers/include/cub/agent/agent_radix_sort_histogram.cuh +2 -5
- cuda/cccl/headers/include/cub/agent/agent_radix_sort_onesweep.cuh +2 -5
- cuda/cccl/headers/include/cub/agent/agent_radix_sort_upsweep.cuh +2 -5
- cuda/cccl/headers/include/cub/agent/agent_rle.cuh +2 -5
- cuda/cccl/headers/include/cub/agent/agent_scan.cuh +5 -1
- cuda/cccl/headers/include/cub/agent/agent_scan_by_key.cuh +2 -5
- cuda/cccl/headers/include/cub/agent/agent_segmented_radix_sort.cuh +2 -5
- cuda/cccl/headers/include/cub/agent/agent_select_if.cuh +2 -5
- cuda/cccl/headers/include/cub/agent/agent_sub_warp_merge_sort.cuh +2 -5
- cuda/cccl/headers/include/cub/agent/agent_three_way_partition.cuh +2 -5
- cuda/cccl/headers/include/cub/agent/agent_unique_by_key.cuh +22 -5
- cuda/cccl/headers/include/cub/block/block_radix_rank.cuh +3 -2
- cuda/cccl/headers/include/cub/block/block_radix_sort.cuh +4 -2
- cuda/cccl/headers/include/cub/detail/device_memory_resource.cuh +1 -0
- cuda/cccl/headers/include/cub/device/device_segmented_reduce.cuh +158 -247
- cuda/cccl/headers/include/cub/device/dispatch/dispatch_merge.cuh +4 -4
- cuda/cccl/headers/include/cub/device/dispatch/dispatch_radix_sort.cuh +2 -11
- cuda/cccl/headers/include/cub/device/dispatch/dispatch_reduce.cuh +8 -26
- cuda/cccl/headers/include/cub/device/dispatch/dispatch_reduce_deterministic.cuh +1 -6
- cuda/cccl/headers/include/cub/device/dispatch/dispatch_reduce_nondeterministic.cuh +0 -1
- cuda/cccl/headers/include/cub/device/dispatch/dispatch_segmented_sort.cuh +2 -3
- cuda/cccl/headers/include/cub/device/dispatch/kernels/reduce.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/kernels/scan.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/kernels/segmented_reduce.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_adjacent_difference.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_batch_memcpy.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_for.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_histogram.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_merge.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_merge_sort.cuh +8 -0
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_radix_sort.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_reduce_by_key.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_run_length_encode.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_scan.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_scan_by_key.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_segmented_sort.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_three_way_partition.cuh +2 -5
- cuda/cccl/headers/include/cub/device/dispatch/tuning/tuning_unique_by_key.cuh +10 -0
- cuda/cccl/headers/include/cub/warp/specializations/warp_reduce_shfl.cuh +3 -2
- cuda/cccl/headers/include/cub/warp/specializations/warp_reduce_smem.cuh +3 -2
- cuda/cccl/headers/include/cub/warp/specializations/warp_scan_shfl.cuh +2 -2
- cuda/cccl/headers/include/cuda/__algorithm/common.h +1 -1
- cuda/cccl/headers/include/cuda/__algorithm/copy.h +1 -1
- cuda/cccl/headers/include/cuda/__algorithm/fill.h +1 -1
- cuda/cccl/headers/include/cuda/__device/all_devices.h +46 -143
- cuda/cccl/headers/include/cuda/__device/arch_traits.h +48 -46
- cuda/cccl/headers/include/cuda/__device/attributes.h +171 -121
- cuda/cccl/headers/include/cuda/__device/device_ref.h +30 -42
- cuda/cccl/headers/include/cuda/__device/physical_device.h +120 -91
- cuda/cccl/headers/include/cuda/__driver/driver_api.h +105 -3
- cuda/cccl/headers/include/cuda/__event/event.h +1 -0
- cuda/cccl/headers/include/cuda/__event/timed_event.h +1 -0
- cuda/cccl/headers/include/cuda/__fwd/devices.h +44 -0
- cuda/cccl/headers/include/cuda/__fwd/zip_iterator.h +9 -0
- cuda/cccl/headers/include/cuda/__iterator/zip_common.h +158 -0
- cuda/cccl/headers/include/cuda/__iterator/zip_iterator.h +8 -120
- cuda/cccl/headers/include/cuda/__iterator/zip_transform_iterator.h +593 -0
- cuda/cccl/headers/include/cuda/__runtime/ensure_current_context.h +4 -3
- cuda/cccl/headers/include/cuda/__stream/stream_ref.h +1 -0
- cuda/cccl/headers/include/cuda/__utility/basic_any.h +1 -1
- cuda/cccl/headers/include/cuda/algorithm +1 -1
- cuda/cccl/headers/include/cuda/devices +10 -0
- cuda/cccl/headers/include/cuda/iterator +1 -0
- cuda/cccl/headers/include/cuda/std/__bit/countl.h +8 -1
- cuda/cccl/headers/include/cuda/std/__bit/countr.h +2 -2
- cuda/cccl/headers/include/cuda/std/__bit/reference.h +11 -11
- cuda/cccl/headers/include/cuda/std/__chrono/duration.h +16 -16
- cuda/cccl/headers/include/cuda/std/__chrono/steady_clock.h +5 -5
- cuda/cccl/headers/include/cuda/std/__chrono/system_clock.h +5 -5
- cuda/cccl/headers/include/cuda/std/__floating_point/fp.h +1 -1
- cuda/cccl/headers/include/cuda/std/__tuple_dir/make_tuple_types.h +23 -1
- cuda/cccl/headers/include/cuda/std/__tuple_dir/tuple_like.h +4 -0
- cuda/cccl/headers/include/cuda/std/__tuple_dir/tuple_like_ext.h +4 -0
- cuda/cccl/headers/include/cuda/std/string_view +12 -5
- cuda/cccl/headers/include/cuda/std/version +1 -4
- cuda/cccl/headers/include/thrust/detail/integer_math.h +3 -20
- cuda/cccl/headers/include/thrust/iterator/iterator_traits.h +11 -0
- cuda/cccl/headers/include/thrust/system/cuda/detail/copy.h +33 -0
- cuda/cccl/parallel/experimental/__init__.py +21 -74
- cuda/compute/__init__.py +77 -0
- cuda/{cccl/parallel/experimental → compute}/_bindings_impl.pyx +1 -1
- cuda/{cccl/parallel/experimental → compute}/algorithms/_histogram.py +2 -2
- cuda/{cccl/parallel/experimental → compute}/algorithms/_merge_sort.py +2 -2
- cuda/{cccl/parallel/experimental → compute}/algorithms/_radix_sort.py +3 -3
- cuda/{cccl/parallel/experimental → compute}/algorithms/_reduce.py +2 -2
- cuda/{cccl/parallel/experimental → compute}/algorithms/_scan.py +4 -4
- cuda/{cccl/parallel/experimental → compute}/algorithms/_segmented_reduce.py +2 -2
- cuda/{cccl/parallel/experimental → compute}/algorithms/_three_way_partition.py +2 -2
- cuda/{cccl/parallel/experimental → compute}/algorithms/_transform.py +4 -4
- cuda/{cccl/parallel/experimental → compute}/algorithms/_unique_by_key.py +2 -2
- cuda/{cccl/parallel/experimental → compute}/cu12/_bindings_impl.cpython-312-aarch64-linux-gnu.so +0 -0
- cuda/{cccl/parallel/experimental → compute}/cu12/cccl/libcccl.c.parallel.so +0 -0
- cuda/{cccl/parallel/experimental → compute}/cu13/_bindings_impl.cpython-312-aarch64-linux-gnu.so +0 -0
- cuda/{cccl/parallel/experimental → compute}/cu13/cccl/libcccl.c.parallel.so +0 -0
- cuda/{cccl/parallel/experimental → compute}/iterators/_factories.py +8 -8
- cuda/{cccl/parallel/experimental → compute}/struct.py +2 -2
- cuda/coop/__init__.py +8 -0
- cuda/{cccl/cooperative/experimental → coop}/_nvrtc.py +3 -2
- cuda/{cccl/cooperative/experimental → coop}/_scan_op.py +3 -3
- cuda/{cccl/cooperative/experimental → coop}/_types.py +2 -2
- cuda/{cccl/cooperative/experimental → coop}/_typing.py +1 -1
- cuda/{cccl/cooperative/experimental → coop}/block/__init__.py +6 -6
- cuda/{cccl/cooperative/experimental → coop}/block/_block_exchange.py +4 -4
- cuda/{cccl/cooperative/experimental → coop}/block/_block_load_store.py +6 -6
- cuda/{cccl/cooperative/experimental → coop}/block/_block_merge_sort.py +4 -4
- cuda/{cccl/cooperative/experimental → coop}/block/_block_radix_sort.py +6 -6
- cuda/{cccl/cooperative/experimental → coop}/block/_block_reduce.py +6 -6
- cuda/{cccl/cooperative/experimental → coop}/block/_block_scan.py +7 -7
- cuda/coop/warp/__init__.py +9 -0
- cuda/{cccl/cooperative/experimental → coop}/warp/_warp_merge_sort.py +3 -3
- cuda/{cccl/cooperative/experimental → coop}/warp/_warp_reduce.py +6 -6
- cuda/{cccl/cooperative/experimental → coop}/warp/_warp_scan.py +4 -4
- {cuda_cccl-0.3.0.dist-info → cuda_cccl-0.3.1.dist-info}/METADATA +1 -1
- {cuda_cccl-0.3.0.dist-info → cuda_cccl-0.3.1.dist-info}/RECORD +141 -138
- cuda/cccl/cooperative/experimental/warp/__init__.py +0 -9
- cuda/cccl/headers/include/cub/device/dispatch/dispatch_advance_iterators.cuh +0 -111
- cuda/cccl/parallel/experimental/.gitignore +0 -4
- /cuda/{cccl/parallel/experimental → compute}/_bindings.py +0 -0
- /cuda/{cccl/parallel/experimental → compute}/_bindings.pyi +0 -0
- /cuda/{cccl/parallel/experimental → compute}/_caching.py +0 -0
- /cuda/{cccl/parallel/experimental → compute}/_cccl_interop.py +0 -0
- /cuda/{cccl/parallel/experimental → compute}/_utils/__init__.py +0 -0
- /cuda/{cccl/parallel/experimental → compute}/_utils/protocols.py +0 -0
- /cuda/{cccl/parallel/experimental → compute}/_utils/temp_storage_buffer.py +0 -0
- /cuda/{cccl/parallel/experimental → compute}/algorithms/__init__.py +0 -0
- /cuda/{cccl/parallel/experimental → compute}/cccl/.gitkeep +0 -0
- /cuda/{cccl/parallel/experimental → compute}/iterators/__init__.py +0 -0
- /cuda/{cccl/parallel/experimental → compute}/iterators/_iterators.py +0 -0
- /cuda/{cccl/parallel/experimental → compute}/iterators/_zip_iterator.py +0 -0
- /cuda/{cccl/parallel/experimental → compute}/numba_utils.py +0 -0
- /cuda/{cccl/parallel/experimental → compute}/op.py +0 -0
- /cuda/{cccl/parallel/experimental → compute}/typing.py +0 -0
- /cuda/{cccl/cooperative/experimental → coop}/_caching.py +0 -0
- /cuda/{cccl/cooperative/experimental → coop}/_common.py +0 -0
- {cuda_cccl-0.3.0.dist-info → cuda_cccl-0.3.1.dist-info}/WHEEL +0 -0
- {cuda_cccl-0.3.0.dist-info → cuda_cccl-0.3.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -11,7 +11,7 @@
|
|
|
11
11
|
#ifndef _CUDA___DEVICE_DEVICE_REF_H
|
|
12
12
|
#define _CUDA___DEVICE_DEVICE_REF_H
|
|
13
13
|
|
|
14
|
-
#include <cuda/
|
|
14
|
+
#include <cuda/std/detail/__config>
|
|
15
15
|
|
|
16
16
|
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
|
|
17
17
|
# pragma GCC system_header
|
|
@@ -22,44 +22,32 @@
|
|
|
22
22
|
#endif // no system header
|
|
23
23
|
|
|
24
24
|
#if _CCCL_HAS_CTK() && !_CCCL_COMPILER(NVRTC)
|
|
25
|
+
|
|
25
26
|
# include <cuda/__driver/driver_api.h>
|
|
27
|
+
# include <cuda/__fwd/devices.h>
|
|
26
28
|
# include <cuda/__runtime/types.h>
|
|
27
|
-
|
|
28
|
-
# include <
|
|
29
|
-
# include <vector>
|
|
29
|
+
# include <cuda/std/span>
|
|
30
|
+
# include <cuda/std/string_view>
|
|
30
31
|
|
|
31
32
|
# include <cuda/std/__cccl/prologue.h>
|
|
32
33
|
|
|
33
34
|
_CCCL_BEGIN_NAMESPACE_CUDA
|
|
34
|
-
class physical_device;
|
|
35
|
-
namespace arch
|
|
36
|
-
{
|
|
37
|
-
struct traits_t;
|
|
38
|
-
} // namespace arch
|
|
39
|
-
|
|
40
|
-
namespace __detail
|
|
41
|
-
{
|
|
42
|
-
template <::cudaDeviceAttr _Attr>
|
|
43
|
-
struct __dev_attr;
|
|
44
|
-
} // namespace __detail
|
|
45
35
|
|
|
46
36
|
//! @brief A non-owning representation of a CUDA device
|
|
47
37
|
class device_ref
|
|
48
38
|
{
|
|
49
|
-
friend class physical_device;
|
|
50
|
-
|
|
51
39
|
int __id_ = 0;
|
|
52
40
|
|
|
53
41
|
public:
|
|
54
42
|
//! @brief Create a `device_ref` object from a native device ordinal.
|
|
55
|
-
/*implicit*/ constexpr device_ref(int __id) noexcept
|
|
43
|
+
/*implicit*/ _CCCL_HOST_API constexpr device_ref(int __id) noexcept
|
|
56
44
|
: __id_(__id)
|
|
57
45
|
{}
|
|
58
46
|
|
|
59
47
|
//! @brief Retrieve the native ordinal of the `device_ref`
|
|
60
48
|
//!
|
|
61
49
|
//! @return int The native device ordinal held by the `device_ref` object
|
|
62
|
-
[[nodiscard]] constexpr int get() const noexcept
|
|
50
|
+
[[nodiscard]] _CCCL_HOST_API constexpr int get() const noexcept
|
|
63
51
|
{
|
|
64
52
|
return __id_;
|
|
65
53
|
}
|
|
@@ -72,7 +60,7 @@ public:
|
|
|
72
60
|
//! @param __lhs The first `device_ref` to compare
|
|
73
61
|
//! @param __rhs The second `device_ref` to compare
|
|
74
62
|
//! @return true if `lhs` and `rhs` refer to the same device ordinal
|
|
75
|
-
[[nodiscard]] friend constexpr bool operator==(device_ref __lhs, device_ref __rhs) noexcept
|
|
63
|
+
[[nodiscard]] friend _CCCL_HOST_API constexpr bool operator==(device_ref __lhs, device_ref __rhs) noexcept
|
|
76
64
|
{
|
|
77
65
|
return __lhs.__id_ == __rhs.__id_;
|
|
78
66
|
}
|
|
@@ -86,7 +74,7 @@ public:
|
|
|
86
74
|
//! @param __lhs The first `device_ref` to compare
|
|
87
75
|
//! @param __rhs The second `device_ref` to compare
|
|
88
76
|
//! @return true if `lhs` and `rhs` refer to different device ordinal
|
|
89
|
-
[[nodiscard]] constexpr
|
|
77
|
+
[[nodiscard]] friend _CCCL_HOST_API constexpr bool operator!=(device_ref __lhs, device_ref __rhs) noexcept
|
|
90
78
|
{
|
|
91
79
|
return __lhs.__id_ != __rhs.__id_;
|
|
92
80
|
}
|
|
@@ -101,38 +89,35 @@ public:
|
|
|
101
89
|
//!
|
|
102
90
|
//! @sa device::attrs
|
|
103
91
|
template <typename _Attr>
|
|
104
|
-
[[nodiscard]] auto attribute(_Attr __attr) const
|
|
92
|
+
[[nodiscard]] _CCCL_HOST_API auto attribute(_Attr __attr) const
|
|
105
93
|
{
|
|
106
94
|
return __attr(*this);
|
|
107
95
|
}
|
|
108
96
|
|
|
109
97
|
//! @overload
|
|
110
98
|
template <::cudaDeviceAttr _Attr>
|
|
111
|
-
[[nodiscard]] auto attribute() const
|
|
99
|
+
[[nodiscard]] _CCCL_HOST_API auto attribute() const
|
|
112
100
|
{
|
|
113
|
-
return attribute(
|
|
101
|
+
return attribute(__dev_attr<_Attr>());
|
|
114
102
|
}
|
|
115
103
|
|
|
116
104
|
//! @brief Retrieve the memory location of this device
|
|
117
105
|
//!
|
|
118
106
|
//! @return The memory location of this device
|
|
119
|
-
[[nodiscard]] operator memory_location() const noexcept
|
|
107
|
+
[[nodiscard]] _CCCL_HOST_API operator memory_location() const noexcept
|
|
120
108
|
{
|
|
121
109
|
return memory_location{::cudaMemLocationTypeDevice, get()};
|
|
122
110
|
}
|
|
123
111
|
|
|
124
|
-
//! @brief
|
|
125
|
-
|
|
126
|
-
//! @return String containing the name of this device.
|
|
127
|
-
[[nodiscard]] ::std::string name() const
|
|
128
|
-
{
|
|
129
|
-
constexpr int __max_name_length = 256;
|
|
130
|
-
::std::string __name(256, 0);
|
|
112
|
+
//! @brief Initializes the primary context of the device.
|
|
113
|
+
_CCCL_HOST_API void init() const; // implemented in <cuda/__device/physical_device.h> to avoid circular dependency
|
|
131
114
|
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
115
|
+
//! @brief Retrieve the name of this device.
|
|
116
|
+
//!
|
|
117
|
+
//! @return String view containing the name of this device.
|
|
118
|
+
[[nodiscard]] _CCCL_HOST_API ::cuda::std::string_view name() const; // implemented in
|
|
119
|
+
// <cuda/__device/physical_device.h> to avoid
|
|
120
|
+
// circular dependency
|
|
136
121
|
|
|
137
122
|
//! @brief Queries if its possible for this device to directly access specified device's memory.
|
|
138
123
|
//!
|
|
@@ -142,7 +127,7 @@ public:
|
|
|
142
127
|
//!
|
|
143
128
|
//! @param __other_dev Device to query the peer access
|
|
144
129
|
//! @return true if its possible for this device to access the specified device's memory
|
|
145
|
-
[[nodiscard]] bool has_peer_access_to(device_ref __other_dev) const
|
|
130
|
+
[[nodiscard]] _CCCL_HOST_API bool has_peer_access_to(device_ref __other_dev) const
|
|
146
131
|
{
|
|
147
132
|
return ::cuda::__driver::__deviceCanAccessPeer(
|
|
148
133
|
::cuda::__driver::__deviceGet(get()), ::cuda::__driver::__deviceGet(__other_dev.get()));
|
|
@@ -154,19 +139,22 @@ public:
|
|
|
154
139
|
//! that are shared by all devices belonging to given architecture.
|
|
155
140
|
//!
|
|
156
141
|
//! @return A reference to `arch_traits_t` object containing architecture traits of this device
|
|
157
|
-
const arch::traits_t& arch_traits() const;
|
|
142
|
+
[[nodiscard]] _CCCL_HOST_API const arch::traits_t& arch_traits() const; // implemented in
|
|
143
|
+
// <cuda/__device/physical_device.h> to avoid
|
|
144
|
+
// circular dependency
|
|
158
145
|
|
|
159
146
|
// TODO this might return some more complex type in the future
|
|
160
147
|
// TODO we might want to include the calling device, depends on what we decide
|
|
161
148
|
// peer access APIs
|
|
162
149
|
|
|
163
|
-
//! @brief Retrieve
|
|
150
|
+
//! @brief Retrieve `device_ref`s that are peers of this device
|
|
164
151
|
//!
|
|
165
|
-
//! The device on which this API is called is not included in the vector
|
|
166
|
-
//! if a full group of peer devices is needed, it needs to be pushed_back separately.
|
|
152
|
+
//! The device on which this API is called is not included in the vector.
|
|
167
153
|
//!
|
|
168
154
|
//! @throws cuda_error if any peer access query fails
|
|
169
|
-
::std::
|
|
155
|
+
[[nodiscard]] _CCCL_HOST_API ::cuda::std::span<const device_ref> peers() const; // implemented in
|
|
156
|
+
// <cuda/__device/physical_device.h>
|
|
157
|
+
// to avoid circular dependency
|
|
170
158
|
};
|
|
171
159
|
|
|
172
160
|
_CCCL_END_NAMESPACE_CUDA
|
|
@@ -11,7 +11,7 @@
|
|
|
11
11
|
#ifndef _CUDA___DEVICE_PHYSICAL_DEVICE_H
|
|
12
12
|
#define _CUDA___DEVICE_PHYSICAL_DEVICE_H
|
|
13
13
|
|
|
14
|
-
#include <cuda/
|
|
14
|
+
#include <cuda/std/detail/__config>
|
|
15
15
|
|
|
16
16
|
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
|
|
17
17
|
# pragma GCC system_header
|
|
@@ -24,58 +24,71 @@
|
|
|
24
24
|
#if _CCCL_HAS_CTK() && !_CCCL_COMPILER(NVRTC)
|
|
25
25
|
|
|
26
26
|
# include <cuda/__device/arch_traits.h>
|
|
27
|
-
# include <cuda/__device/attributes.h>
|
|
28
27
|
# include <cuda/__device/device_ref.h>
|
|
29
28
|
# include <cuda/__driver/driver_api.h>
|
|
29
|
+
# include <cuda/__fwd/devices.h>
|
|
30
|
+
# include <cuda/std/__cstddef/types.h>
|
|
31
|
+
# include <cuda/std/span>
|
|
32
|
+
# include <cuda/std/string_view>
|
|
30
33
|
|
|
31
34
|
# include <cassert>
|
|
35
|
+
# include <memory>
|
|
32
36
|
# include <mutex>
|
|
37
|
+
# include <vector>
|
|
33
38
|
|
|
34
39
|
# include <cuda/std/__cccl/prologue.h>
|
|
35
40
|
|
|
36
41
|
_CCCL_BEGIN_NAMESPACE_CUDA
|
|
37
|
-
namespace __detail
|
|
38
|
-
{
|
|
39
|
-
//! @brief A proxy object used to in-place construct a `device` object from an
|
|
40
|
-
//! integer ID. Used in __detail/all_devices.cuh.
|
|
41
|
-
struct __emplace_device
|
|
42
|
-
{
|
|
43
|
-
int __id_;
|
|
44
|
-
|
|
45
|
-
[[nodiscard]] operator physical_device() const;
|
|
46
42
|
|
|
47
|
-
|
|
48
|
-
};
|
|
49
|
-
} // namespace __detail
|
|
50
|
-
|
|
51
|
-
//! @brief For a given attribute, type of the attribute value.
|
|
52
|
-
//!
|
|
53
|
-
//! @par Example
|
|
54
|
-
//! @code
|
|
55
|
-
//! using threads_per_block_t = device::attr_result_t<device_attributes::max_threads_per_block>;
|
|
56
|
-
//! static_assert(std::is_same_v<threads_per_block_t, int>);
|
|
57
|
-
//! @endcode
|
|
58
|
-
//!
|
|
59
|
-
//! @sa device_attributes
|
|
60
|
-
template <::cudaDeviceAttr _Attr>
|
|
61
|
-
using device_attribute_result_t = typename __detail::__dev_attr<_Attr>::type;
|
|
43
|
+
[[nodiscard]] inline ::cuda::std::span<__physical_device> __physical_devices();
|
|
62
44
|
|
|
63
45
|
// This is the element type of the the global `devices` array. In the future, we
|
|
64
46
|
// can cache device properties here.
|
|
65
47
|
//
|
|
66
48
|
//! @brief An immovable "owning" representation of a CUDA device.
|
|
67
|
-
class
|
|
49
|
+
class __physical_device
|
|
68
50
|
{
|
|
51
|
+
friend _CCCL_HOST_API inline ::std::unique_ptr<__physical_device[]>
|
|
52
|
+
__make_physical_devices(::cuda::std::size_t __device_count);
|
|
53
|
+
|
|
54
|
+
::CUdevice __device_{};
|
|
55
|
+
|
|
56
|
+
// TODO We should have some of the attributes just return from the arch traits
|
|
57
|
+
::std::once_flag __traits_once_flag_{};
|
|
58
|
+
arch::traits_t __traits_{};
|
|
59
|
+
|
|
60
|
+
::std::once_flag __primary_ctx_once_flag_{};
|
|
61
|
+
::CUcontext __primary_ctx_{};
|
|
62
|
+
|
|
63
|
+
static constexpr ::cuda::std::size_t __max_name_length{256};
|
|
64
|
+
::std::once_flag __name_once_flag_{};
|
|
65
|
+
char __name_[__max_name_length]{};
|
|
66
|
+
::cuda::std::size_t __name_length_{};
|
|
67
|
+
|
|
68
|
+
::std::once_flag __peers_once_flag_{};
|
|
69
|
+
::std::vector<device_ref> __peers_{};
|
|
70
|
+
|
|
69
71
|
public:
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
72
|
+
_CCCL_HIDE_FROM_ABI __physical_device() = default;
|
|
73
|
+
|
|
74
|
+
_CCCL_HOST_API ~__physical_device()
|
|
75
|
+
{
|
|
76
|
+
if (__primary_ctx_ != nullptr)
|
|
77
|
+
{
|
|
78
|
+
[[maybe_unused]] const auto __ignore = ::cuda::__driver::__primaryCtxReleaseNoThrow(__device_);
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
//! @brief Retrieve the primary context for this device.
|
|
83
|
+
//!
|
|
84
|
+
//! @return A reference to the primary context for this device.
|
|
85
|
+
[[nodiscard]] _CCCL_HOST_API ::CUcontext __primary_context()
|
|
86
|
+
{
|
|
87
|
+
::std::call_once(__primary_ctx_once_flag_, [this]() {
|
|
88
|
+
__primary_ctx_ = ::cuda::__driver::__primaryCtxRetain(__device_);
|
|
89
|
+
});
|
|
90
|
+
return __primary_ctx_;
|
|
91
|
+
}
|
|
79
92
|
|
|
80
93
|
//! @brief Retrieve architecture traits of this device.
|
|
81
94
|
//!
|
|
@@ -83,81 +96,97 @@ public:
|
|
|
83
96
|
//! that are shared by all devices belonging to given architecture.
|
|
84
97
|
//!
|
|
85
98
|
//! @return A reference to `arch_traits_t` object containing architecture traits of this device
|
|
86
|
-
const arch::traits_t&
|
|
99
|
+
[[nodiscard]] _CCCL_HOST_API const arch::traits_t& __arch_traits()
|
|
87
100
|
{
|
|
88
|
-
|
|
101
|
+
::std::call_once(__traits_once_flag_, [this]() {
|
|
102
|
+
const auto __id = ::cuda::__driver::__cudevice_to_ordinal(__device_);
|
|
103
|
+
__traits_ = ::cuda::arch::__arch_traits_might_be_unknown(__id, device_attributes::compute_capability(__id));
|
|
104
|
+
});
|
|
105
|
+
return __traits_;
|
|
89
106
|
}
|
|
90
107
|
|
|
91
|
-
|
|
92
|
-
//!
|
|
93
|
-
//! @return A reference to the primary context for this device.
|
|
94
|
-
::CUcontext primary_context() const
|
|
108
|
+
[[nodiscard]] _CCCL_HOST_API ::cuda::std::string_view __name()
|
|
95
109
|
{
|
|
96
|
-
::std::call_once(
|
|
97
|
-
|
|
98
|
-
|
|
110
|
+
::std::call_once(__name_once_flag_, [this]() {
|
|
111
|
+
const auto __id = ::cuda::__driver::__cudevice_to_ordinal(__device_);
|
|
112
|
+
::cuda::__driver::__deviceGetName(__name_, __max_name_length, __id);
|
|
113
|
+
__name_length_ = ::cuda::std::char_traits<char>::length(__name_);
|
|
99
114
|
});
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
return __primary_ctx;
|
|
115
|
+
return ::cuda::std::string_view{__name_, __name_length_};
|
|
103
116
|
}
|
|
104
117
|
|
|
105
|
-
|
|
118
|
+
[[nodiscard]] _CCCL_HOST_API ::cuda::std::span<const device_ref> __peers()
|
|
106
119
|
{
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
::cuda::__driver::
|
|
110
|
-
|
|
120
|
+
::std::call_once(__peers_once_flag_, [this]() {
|
|
121
|
+
const auto __count = static_cast<int>(::cuda::__physical_devices().size());
|
|
122
|
+
const auto __id = ::cuda::__driver::__cudevice_to_ordinal(__device_);
|
|
123
|
+
__peers_.reserve(__count);
|
|
124
|
+
for (int __other_id = 0; __other_id < __count; ++__other_id)
|
|
125
|
+
{
|
|
126
|
+
// Exclude the device this API is called on. The main use case for this API
|
|
127
|
+
// is enable/disable peer access. While enable peer access can be called on
|
|
128
|
+
// device on which memory resides, disable peer access will error-out.
|
|
129
|
+
// Usage of the peer access control is smoother when *this is excluded,
|
|
130
|
+
// while it can be easily added with .push_back() on the vector if a full
|
|
131
|
+
// group of peers is needed (for cases other than peer access control)
|
|
132
|
+
if (__other_id != __id)
|
|
133
|
+
{
|
|
134
|
+
device_ref __dev{__id};
|
|
135
|
+
device_ref __other_dev{__other_id};
|
|
136
|
+
|
|
137
|
+
// While in almost all practical applications peer access should be symmetrical,
|
|
138
|
+
// it is possible to build a system with one directional peer access, check
|
|
139
|
+
// both ways here just to be safe
|
|
140
|
+
if (__dev.has_peer_access_to(__other_dev) && __other_dev.has_peer_access_to(__dev))
|
|
141
|
+
{
|
|
142
|
+
__peers_.push_back(__other_dev);
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
});
|
|
147
|
+
return ::cuda::std::span<const device_ref>{__peers_};
|
|
111
148
|
}
|
|
149
|
+
};
|
|
112
150
|
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
// TODO should this be a reference/pointer to the constexpr traits instances?
|
|
125
|
-
// Do we care about lazy init?
|
|
126
|
-
// We should have some of the attributes just return from the arch traits
|
|
127
|
-
arch::traits_t __traits;
|
|
128
|
-
|
|
129
|
-
explicit physical_device(int __id)
|
|
130
|
-
: device_ref(__id)
|
|
131
|
-
, __traits(arch::__arch_traits_might_be_unknown(__id, device_attributes::compute_capability(__id)))
|
|
132
|
-
{}
|
|
151
|
+
[[nodiscard]] _CCCL_HOST_API inline ::std::unique_ptr<__physical_device[]>
|
|
152
|
+
__make_physical_devices(::cuda::std::size_t __device_count)
|
|
153
|
+
{
|
|
154
|
+
::std::unique_ptr<__physical_device[]> __devices{::new __physical_device[__device_count]};
|
|
155
|
+
for (::cuda::std::size_t __i = 0; __i < __device_count; ++__i)
|
|
156
|
+
{
|
|
157
|
+
__devices[__i].__device_ = static_cast<int>(__i);
|
|
158
|
+
}
|
|
159
|
+
return __devices;
|
|
160
|
+
}
|
|
133
161
|
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
162
|
+
[[nodiscard]] inline ::cuda::std::span<__physical_device> __physical_devices()
|
|
163
|
+
{
|
|
164
|
+
static const auto __device_count = static_cast<::cuda::std::size_t>(::cuda::__driver::__deviceGetCount());
|
|
165
|
+
static const auto __devices = ::cuda::__make_physical_devices(__device_count);
|
|
166
|
+
return ::cuda::std::span<__physical_device>{__devices.get(), __device_count};
|
|
167
|
+
}
|
|
139
168
|
|
|
140
|
-
|
|
141
|
-
friend bool operator==(int __lhs, const physical_device& __rhs) = delete;
|
|
169
|
+
// device_ref methods dependent on __physical_device
|
|
142
170
|
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
};
|
|
171
|
+
_CCCL_HOST_API inline void device_ref::init() const
|
|
172
|
+
{
|
|
173
|
+
(void) ::cuda::__physical_devices()[__id_].__primary_context();
|
|
174
|
+
}
|
|
148
175
|
|
|
149
|
-
|
|
176
|
+
[[nodiscard]] _CCCL_HOST_API inline ::cuda::std::string_view device_ref::name() const
|
|
150
177
|
{
|
|
151
|
-
|
|
178
|
+
return ::cuda::__physical_devices()[__id_].__name();
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
[[nodiscard]] _CCCL_HOST_API inline const arch::traits_t& device_ref::arch_traits() const
|
|
152
182
|
{
|
|
153
|
-
return
|
|
183
|
+
return ::cuda::__physical_devices()[__id_].__arch_traits();
|
|
154
184
|
}
|
|
155
185
|
|
|
156
|
-
[[nodiscard]] inline
|
|
186
|
+
[[nodiscard]] _CCCL_HOST_API inline ::cuda::std::span<const device_ref> device_ref::peers() const
|
|
157
187
|
{
|
|
158
|
-
return
|
|
188
|
+
return ::cuda::__physical_devices()[__id_].__peers();
|
|
159
189
|
}
|
|
160
|
-
} // namespace __detail
|
|
161
190
|
|
|
162
191
|
_CCCL_END_NAMESPACE_CUDA
|
|
163
192
|
|
|
@@ -216,11 +216,10 @@ _CCCL_HOST_API inline void __deviceGetName(char* __name_out, int __len, int __or
|
|
|
216
216
|
return __result;
|
|
217
217
|
}
|
|
218
218
|
|
|
219
|
-
_CCCL_HOST_API inline
|
|
219
|
+
[[nodiscard]] _CCCL_HOST_API inline ::cudaError_t __primaryCtxReleaseNoThrow(::CUdevice __dev)
|
|
220
220
|
{
|
|
221
221
|
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuDevicePrimaryCtxRelease);
|
|
222
|
-
|
|
223
|
-
::cuda::__driver::__call_driver_fn(__driver_fn, "Failed to release context for a device", __dev);
|
|
222
|
+
return static_cast<::cudaError_t>(__driver_fn(__dev));
|
|
224
223
|
}
|
|
225
224
|
|
|
226
225
|
[[nodiscard]] _CCCL_HOST_API inline bool __isPrimaryCtxActive(::CUdevice __dev)
|
|
@@ -325,6 +324,109 @@ _CCCL_HOST_API void __memsetAsync(void* __dst, _Tp __value, size_t __count, ::CU
|
|
|
325
324
|
}
|
|
326
325
|
}
|
|
327
326
|
|
|
327
|
+
_CCCL_HOST_API inline ::cudaError_t __mempoolCreateNoThrow(::CUmemoryPool* __pool, ::CUmemPoolProps* __props)
|
|
328
|
+
{
|
|
329
|
+
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuMemPoolCreate);
|
|
330
|
+
return static_cast<::cudaError_t>(__driver_fn(__pool, __props));
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
_CCCL_HOST_API inline void __mempoolSetAttribute(::CUmemoryPool __pool, ::CUmemPool_attribute __attr, void* __value)
|
|
334
|
+
{
|
|
335
|
+
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuMemPoolSetAttribute);
|
|
336
|
+
::cuda::__driver::__call_driver_fn(__driver_fn, "Failed to set attribute for a memory pool", __pool, __attr, __value);
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
_CCCL_HOST_API inline size_t __mempoolGetAttribute(::CUmemoryPool __pool, ::CUmemPool_attribute __attr)
|
|
340
|
+
{
|
|
341
|
+
size_t __value = 0;
|
|
342
|
+
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuMemPoolGetAttribute);
|
|
343
|
+
::cuda::__driver::__call_driver_fn(__driver_fn, "Failed to get attribute for a memory pool", __pool, __attr, &__value);
|
|
344
|
+
return __value;
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
_CCCL_HOST_API inline void __mempoolDestroy(::CUmemoryPool __pool)
|
|
348
|
+
{
|
|
349
|
+
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuMemPoolDestroy);
|
|
350
|
+
::cuda::__driver::__call_driver_fn(__driver_fn, "Failed to destroy a memory pool", __pool);
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
_CCCL_HOST_API inline ::CUdeviceptr
|
|
354
|
+
__mallocFromPoolAsync(::cuda::std::size_t __bytes, ::CUmemoryPool __pool, ::CUstream __stream)
|
|
355
|
+
{
|
|
356
|
+
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuMemAllocFromPoolAsync);
|
|
357
|
+
::CUdeviceptr __result = 0;
|
|
358
|
+
::cuda::__driver::__call_driver_fn(
|
|
359
|
+
__driver_fn, "Failed to allocate memory from a memory pool", &__result, __bytes, __pool, __stream);
|
|
360
|
+
return __result;
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
_CCCL_HOST_API inline void __mempoolTrimTo(::CUmemoryPool __pool, ::cuda::std::size_t __min_bytes_to_keep)
|
|
364
|
+
{
|
|
365
|
+
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuMemPoolTrimTo);
|
|
366
|
+
::cuda::__driver::__call_driver_fn(__driver_fn, "Failed to trim a memory pool", __pool, __min_bytes_to_keep);
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
_CCCL_HOST_API inline ::cudaError_t __freeAsyncNoThrow(::CUdeviceptr __dptr, ::CUstream __stream)
|
|
370
|
+
{
|
|
371
|
+
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuMemFreeAsync);
|
|
372
|
+
return static_cast<::cudaError_t>(__driver_fn(__dptr, __stream));
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
_CCCL_HOST_API inline void __mempoolSetAccess(::CUmemoryPool __pool, ::CUmemAccessDesc* __descs, ::size_t __count)
|
|
376
|
+
{
|
|
377
|
+
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuMemPoolSetAccess);
|
|
378
|
+
::cuda::__driver::__call_driver_fn(__driver_fn, "Failed to set access of a memory pool", __pool, __descs, __count);
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
_CCCL_HOST_API inline ::CUmemAccess_flags __mempoolGetAccess(::CUmemoryPool __pool, ::CUmemLocation* __location)
|
|
382
|
+
{
|
|
383
|
+
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuMemPoolGetAccess);
|
|
384
|
+
::CUmemAccess_flags __flags;
|
|
385
|
+
::cuda::__driver::__call_driver_fn(__driver_fn, "Failed to get access of a memory pool", &__flags, __pool, __location);
|
|
386
|
+
return __flags;
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
# if _CCCL_CTK_AT_LEAST(13, 0)
|
|
390
|
+
_CCCL_HOST_API inline ::CUmemoryPool
|
|
391
|
+
__getDefaultMemPool(CUmemLocation __location, CUmemAllocationType_enum __allocation_type)
|
|
392
|
+
{
|
|
393
|
+
static auto __driver_fn =
|
|
394
|
+
_CCCLRT_GET_DRIVER_FUNCTION_VERSIONED(cuMemGetDefaultMemPool, cuMemGetDefaultMemPool, 13, 0);
|
|
395
|
+
::CUmemoryPool __result = nullptr;
|
|
396
|
+
::cuda::__driver::__call_driver_fn(
|
|
397
|
+
__driver_fn, "Failed to get default memory pool", &__result, &__location, __allocation_type);
|
|
398
|
+
return __result;
|
|
399
|
+
}
|
|
400
|
+
# endif // _CCCL_CTK_AT_LEAST(13, 0)
|
|
401
|
+
|
|
402
|
+
_CCCL_HOST_API inline ::CUdeviceptr __mallocManaged(::cuda::std::size_t __bytes, unsigned int __flags)
|
|
403
|
+
{
|
|
404
|
+
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuMemAllocManaged);
|
|
405
|
+
::CUdeviceptr __result = 0;
|
|
406
|
+
::cuda::__driver::__call_driver_fn(__driver_fn, "Failed to allocate managed memory", &__result, __bytes, __flags);
|
|
407
|
+
return __result;
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
_CCCL_HOST_API inline void* __mallocHost(::cuda::std::size_t __bytes)
|
|
411
|
+
{
|
|
412
|
+
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuMemAllocHost);
|
|
413
|
+
void* __result = nullptr;
|
|
414
|
+
::cuda::__driver::__call_driver_fn(__driver_fn, "Failed to allocate host memory", &__result, __bytes);
|
|
415
|
+
return __result;
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
_CCCL_HOST_API inline ::cudaError_t __freeNoThrow(::CUdeviceptr __dptr)
|
|
419
|
+
{
|
|
420
|
+
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuMemFree);
|
|
421
|
+
return static_cast<::cudaError_t>(__driver_fn(__dptr));
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
_CCCL_HOST_API inline ::cudaError_t __freeHostNoThrow(void* __dptr)
|
|
425
|
+
{
|
|
426
|
+
static auto __driver_fn = _CCCLRT_GET_DRIVER_FUNCTION(cuMemFreeHost);
|
|
427
|
+
return static_cast<::cudaError_t>(__driver_fn(__dptr));
|
|
428
|
+
}
|
|
429
|
+
|
|
328
430
|
// Unified Addressing
|
|
329
431
|
|
|
330
432
|
// TODO: we don't want to have these functions here, refactoring expected
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
//===----------------------------------------------------------------------===//
|
|
2
|
+
//
|
|
3
|
+
// Part of libcu++, the C++ Standard Library for your entire system,
|
|
4
|
+
// under the Apache License v2.0 with LLVM Exceptions.
|
|
5
|
+
// See https://llvm.org/LICENSE.txt for license information.
|
|
6
|
+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
7
|
+
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES.
|
|
8
|
+
//
|
|
9
|
+
//===----------------------------------------------------------------------===//
|
|
10
|
+
|
|
11
|
+
#ifndef _CUDA___FWD_DEVICES_H
|
|
12
|
+
#define _CUDA___FWD_DEVICES_H
|
|
13
|
+
|
|
14
|
+
#include <cuda/std/detail/__config>
|
|
15
|
+
|
|
16
|
+
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
|
|
17
|
+
# pragma GCC system_header
|
|
18
|
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
|
|
19
|
+
# pragma clang system_header
|
|
20
|
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
|
|
21
|
+
# pragma system_header
|
|
22
|
+
#endif // no system header
|
|
23
|
+
|
|
24
|
+
#include <cuda/std/__fwd/span.h>
|
|
25
|
+
|
|
26
|
+
#include <cuda/std/__cccl/prologue.h>
|
|
27
|
+
|
|
28
|
+
_CCCL_BEGIN_NAMESPACE_CUDA
|
|
29
|
+
|
|
30
|
+
class __physical_device;
|
|
31
|
+
class device_ref;
|
|
32
|
+
template <::cudaDeviceAttr _Attr>
|
|
33
|
+
struct __dev_attr;
|
|
34
|
+
|
|
35
|
+
namespace arch
|
|
36
|
+
{
|
|
37
|
+
struct traits_t;
|
|
38
|
+
} // namespace arch
|
|
39
|
+
|
|
40
|
+
_CCCL_END_NAMESPACE_CUDA
|
|
41
|
+
|
|
42
|
+
#include <cuda/std/__cccl/epilogue.h>
|
|
43
|
+
|
|
44
|
+
#endif // _CUDA___FWD_DEVICES_H
|
|
@@ -42,6 +42,15 @@ inline constexpr bool __is_zip_function = false;
|
|
|
42
42
|
template <class _Fn>
|
|
43
43
|
inline constexpr bool __is_zip_function<zip_function<_Fn>> = true;
|
|
44
44
|
|
|
45
|
+
template <class _Fn, class... _Iterators>
|
|
46
|
+
class zip_transform_iterator;
|
|
47
|
+
|
|
48
|
+
template <class>
|
|
49
|
+
inline constexpr bool __is_zip_transform_iterator = false;
|
|
50
|
+
|
|
51
|
+
template <class _Fn, class... _Iterators>
|
|
52
|
+
inline constexpr bool __is_zip_transform_iterator<zip_transform_iterator<_Fn, _Iterators...>> = true;
|
|
53
|
+
|
|
45
54
|
_CCCL_END_NAMESPACE_CUDA
|
|
46
55
|
|
|
47
56
|
#include <cuda/std/__cccl/epilogue.h>
|