numba-cuda 0.18.0__py3-none-any.whl → 0.19.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of numba-cuda might be problematic. Click here for more details.

Files changed (90) hide show
  1. numba_cuda/VERSION +1 -1
  2. numba_cuda/numba/cuda/__init__.py +1 -1
  3. numba_cuda/numba/cuda/_internal/cuda_bf16.py +2 -2
  4. numba_cuda/numba/cuda/_internal/cuda_fp16.py +1 -1
  5. numba_cuda/numba/cuda/api.py +2 -7
  6. numba_cuda/numba/cuda/compiler.py +7 -4
  7. numba_cuda/numba/cuda/core/interpreter.py +3592 -0
  8. numba_cuda/numba/cuda/core/ir_utils.py +2645 -0
  9. numba_cuda/numba/cuda/core/sigutils.py +55 -0
  10. numba_cuda/numba/cuda/cuda_paths.py +9 -17
  11. numba_cuda/numba/cuda/cudadecl.py +1 -1
  12. numba_cuda/numba/cuda/cudadrv/driver.py +4 -19
  13. numba_cuda/numba/cuda/cudadrv/libs.py +1 -2
  14. numba_cuda/numba/cuda/cudadrv/nvrtc.py +44 -44
  15. numba_cuda/numba/cuda/cudadrv/nvvm.py +3 -18
  16. numba_cuda/numba/cuda/cudadrv/runtime.py +12 -1
  17. numba_cuda/numba/cuda/cudamath.py +1 -1
  18. numba_cuda/numba/cuda/decorators.py +4 -3
  19. numba_cuda/numba/cuda/deviceufunc.py +2 -1
  20. numba_cuda/numba/cuda/dispatcher.py +5 -3
  21. numba_cuda/numba/cuda/extending.py +1 -1
  22. numba_cuda/numba/cuda/itanium_mangler.py +211 -0
  23. numba_cuda/numba/cuda/libdevicedecl.py +1 -1
  24. numba_cuda/numba/cuda/libdevicefuncs.py +1 -1
  25. numba_cuda/numba/cuda/lowering.py +1 -1
  26. numba_cuda/numba/cuda/simulator/api.py +1 -1
  27. numba_cuda/numba/cuda/simulator/cudadrv/driver.py +0 -7
  28. numba_cuda/numba/cuda/target.py +1 -2
  29. numba_cuda/numba/cuda/testing.py +4 -6
  30. numba_cuda/numba/cuda/tests/core/test_itanium_mangler.py +80 -0
  31. numba_cuda/numba/cuda/tests/cudadrv/test_context_stack.py +10 -4
  32. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_ndarray.py +1 -1
  33. numba_cuda/numba/cuda/tests/cudadrv/test_deallocations.py +1 -1
  34. numba_cuda/numba/cuda/tests/cudadrv/test_detect.py +1 -1
  35. numba_cuda/numba/cuda/tests/cudadrv/test_emm_plugins.py +1 -1
  36. numba_cuda/numba/cuda/tests/cudadrv/test_linker.py +1 -1
  37. numba_cuda/numba/cuda/tests/cudadrv/test_managed_alloc.py +1 -1
  38. numba_cuda/numba/cuda/tests/cudadrv/test_mvc.py +1 -1
  39. numba_cuda/numba/cuda/tests/cudadrv/test_nvrtc.py +4 -6
  40. numba_cuda/numba/cuda/tests/cudadrv/test_nvvm_driver.py +0 -4
  41. numba_cuda/numba/cuda/tests/cudadrv/test_ptds.py +1 -1
  42. numba_cuda/numba/cuda/tests/cudapy/test_bfloat16.py +1 -3
  43. numba_cuda/numba/cuda/tests/cudapy/test_bfloat16_bindings.py +1 -3
  44. numba_cuda/numba/cuda/tests/cudapy/test_caching.py +146 -3
  45. numba_cuda/numba/cuda/tests/cudapy/test_cffi.py +1 -1
  46. numba_cuda/numba/cuda/tests/cudapy/test_compiler.py +15 -4
  47. numba_cuda/numba/cuda/tests/cudapy/test_cuda_array_interface.py +1 -1
  48. numba_cuda/numba/cuda/tests/cudapy/test_cuda_jit_no_types.py +1 -1
  49. numba_cuda/numba/cuda/tests/cudapy/test_debug.py +1 -1
  50. numba_cuda/numba/cuda/tests/cudapy/test_debuginfo.py +1 -284
  51. numba_cuda/numba/cuda/tests/cudapy/test_debuginfo_types.py +473 -0
  52. numba_cuda/numba/cuda/tests/cudapy/test_device_func.py +1 -1
  53. numba_cuda/numba/cuda/tests/cudapy/test_errors.py +1 -1
  54. numba_cuda/numba/cuda/tests/cudapy/test_extending.py +1 -6
  55. numba_cuda/numba/cuda/tests/cudapy/test_gufunc.py +1 -1
  56. numba_cuda/numba/cuda/tests/cudapy/test_inspect.py +14 -0
  57. numba_cuda/numba/cuda/tests/cudapy/test_ipc.py +1 -1
  58. numba_cuda/numba/cuda/tests/cudapy/test_ir_utils.py +295 -0
  59. numba_cuda/numba/cuda/tests/cudapy/test_lineinfo.py +1 -1
  60. numba_cuda/numba/cuda/tests/cudapy/test_operator.py +1 -1
  61. numba_cuda/numba/cuda/tests/cudapy/test_ufuncs.py +1 -1
  62. numba_cuda/numba/cuda/tests/cudapy/test_warning.py +5 -1
  63. numba_cuda/numba/cuda/tests/doc_examples/test_cpointer.py +1 -1
  64. numba_cuda/numba/cuda/tests/doc_examples/test_cpu_gpu_compat.py +1 -1
  65. numba_cuda/numba/cuda/tests/doc_examples/test_ffi.py +1 -1
  66. numba_cuda/numba/cuda/tests/doc_examples/test_laplace.py +1 -1
  67. numba_cuda/numba/cuda/tests/doc_examples/test_matmul.py +1 -1
  68. numba_cuda/numba/cuda/tests/doc_examples/test_montecarlo.py +1 -1
  69. numba_cuda/numba/cuda/tests/doc_examples/test_reduction.py +1 -1
  70. numba_cuda/numba/cuda/tests/doc_examples/test_sessionize.py +1 -1
  71. numba_cuda/numba/cuda/tests/doc_examples/test_ufunc.py +1 -1
  72. numba_cuda/numba/cuda/tests/doc_examples/test_vecadd.py +1 -1
  73. numba_cuda/numba/cuda/tests/nocuda/test_import.py +1 -1
  74. numba_cuda/numba/cuda/tests/nrt/test_nrt.py +2 -2
  75. numba_cuda/numba/cuda/tests/nrt/test_nrt_refct.py +1 -1
  76. numba_cuda/numba/cuda/tests/support.py +752 -0
  77. numba_cuda/numba/cuda/tests/test_binary_generation/Makefile +3 -3
  78. numba_cuda/numba/cuda/tests/test_binary_generation/generate_raw_ltoir.py +4 -1
  79. numba_cuda/numba/cuda/typing/__init__.py +8 -0
  80. numba_cuda/numba/cuda/typing/templates.py +1453 -0
  81. numba_cuda/numba/cuda/vector_types.py +3 -3
  82. {numba_cuda-0.18.0.dist-info → numba_cuda-0.19.0.dist-info}/METADATA +21 -28
  83. {numba_cuda-0.18.0.dist-info → numba_cuda-0.19.0.dist-info}/RECORD +86 -81
  84. numba_cuda/numba/cuda/include/11/cuda_bf16.h +0 -3749
  85. numba_cuda/numba/cuda/include/11/cuda_bf16.hpp +0 -2683
  86. numba_cuda/numba/cuda/include/11/cuda_fp16.h +0 -3794
  87. numba_cuda/numba/cuda/include/11/cuda_fp16.hpp +0 -2614
  88. {numba_cuda-0.18.0.dist-info → numba_cuda-0.19.0.dist-info}/WHEEL +0 -0
  89. {numba_cuda-0.18.0.dist-info → numba_cuda-0.19.0.dist-info}/licenses/LICENSE +0 -0
  90. {numba_cuda-0.18.0.dist-info → numba_cuda-0.19.0.dist-info}/top_level.txt +0 -0
@@ -1,2614 +0,0 @@
1
- /*
2
- * Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
3
- *
4
- * NOTICE TO LICENSEE:
5
- *
6
- * This source code and/or documentation ("Licensed Deliverables") are
7
- * subject to NVIDIA intellectual property rights under U.S. and
8
- * international Copyright laws.
9
- *
10
- * These Licensed Deliverables contained herein is PROPRIETARY and
11
- * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
- * conditions of a form of NVIDIA software license agreement by and
13
- * between NVIDIA and Licensee ("License Agreement") or electronically
14
- * accepted by Licensee. Notwithstanding any terms or conditions to
15
- * the contrary in the License Agreement, reproduction or disclosure
16
- * of the Licensed Deliverables to any third party without the express
17
- * written consent of NVIDIA is prohibited.
18
- *
19
- * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
- * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
- * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
- * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
- * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
- * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
- * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
- * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
- * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
- * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
- * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
- * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
- * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
- * OF THESE LICENSED DELIVERABLES.
33
- *
34
- * U.S. Government End Users. These Licensed Deliverables are a
35
- * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
- * 1995), consisting of "commercial computer software" and "commercial
37
- * computer software documentation" as such terms are used in 48
38
- * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
- * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
- * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
- * U.S. Government End Users acquire the Licensed Deliverables with
42
- * only those rights set forth herein.
43
- *
44
- * Any use of the Licensed Deliverables in individual and commercial
45
- * software must include, in the user documentation and internal
46
- * comments to the code, the above Disclaimer and U.S. Government End
47
- * Users Notice.
48
- */
49
-
50
- #if !defined(__CUDA_FP16_HPP__)
51
- #define __CUDA_FP16_HPP__
52
-
53
- #if !defined(__CUDA_FP16_H__)
54
- #error "Do not include this file directly. Instead, include cuda_fp16.h."
55
- #endif
56
-
57
- #if !defined(_MSC_VER) && __cplusplus >= 201103L
58
- # define __CPP_VERSION_AT_LEAST_11_FP16
59
- #elif _MSC_FULL_VER >= 190024210 && _MSVC_LANG >= 201103L
60
- # define __CPP_VERSION_AT_LEAST_11_FP16
61
- #endif
62
-
63
- /* C++11 header for std::move.
64
- * In RTC mode, std::move is provided implicitly; don't include the header
65
- */
66
- #if defined(__CPP_VERSION_AT_LEAST_11_FP16) && !defined(__CUDACC_RTC__)
67
- #include <utility>
68
- #endif /* __cplusplus >= 201103L && !defined(__CUDACC_RTC__) */
69
-
70
- /* C++ header for std::memcpy (used for type punning in host-side implementations).
71
- * When compiling as a CUDA source file memcpy is provided implicitly.
72
- * !defined(__CUDACC__) implies !defined(__CUDACC_RTC__).
73
- */
74
- #if defined(__cplusplus) && !defined(__CUDACC__)
75
- #include <cstring>
76
- #endif /* defined(__cplusplus) && !defined(__CUDACC__) */
77
-
78
-
79
- /* Set up function decorations */
80
- #if defined(__CUDACC__)
81
- #define __CUDA_FP16_DECL__ static __device__ __inline__
82
- #define __CUDA_HOSTDEVICE_FP16_DECL__ static __host__ __device__ __inline__
83
- #define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__
84
- #define __CUDA_HOSTDEVICE__ __host__ __device__
85
- #else /* !defined(__CUDACC__) */
86
- #if defined(__GNUC__)
87
- #define __CUDA_HOSTDEVICE_FP16_DECL__ static __attribute__ ((unused))
88
- #else
89
- #define __CUDA_HOSTDEVICE_FP16_DECL__ static
90
- #endif /* defined(__GNUC__) */
91
- #define __CUDA_HOSTDEVICE__
92
- #endif /* defined(__CUDACC_) */
93
-
94
- /* Set up structure-alignment attribute */
95
- #if defined(__CUDACC__)
96
- #define __CUDA_ALIGN__(align) __align__(align)
97
- #else
98
- /* Define alignment macro based on compiler type (cannot assume C11 "_Alignas" is available) */
99
- #if __cplusplus >= 201103L
100
- #define __CUDA_ALIGN__(n) alignas(n) /* C++11 kindly gives us a keyword for this */
101
- #else /* !defined(__CPP_VERSION_AT_LEAST_11_FP16)*/
102
- #if defined(__GNUC__)
103
- #define __CUDA_ALIGN__(n) __attribute__ ((aligned(n)))
104
- #elif defined(_MSC_VER)
105
- #define __CUDA_ALIGN__(n) __declspec(align(n))
106
- #else
107
- #define __CUDA_ALIGN__(n)
108
- #endif /* defined(__GNUC__) */
109
- #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */
110
- #endif /* defined(__CUDACC__) */
111
-
112
- /* Macros to allow half & half2 to be used by inline assembly */
113
- #define __HALF_TO_US(var) *(reinterpret_cast<unsigned short *>(&(var)))
114
- #define __HALF_TO_CUS(var) *(reinterpret_cast<const unsigned short *>(&(var)))
115
- #define __HALF2_TO_UI(var) *(reinterpret_cast<unsigned int *>(&(var)))
116
- #define __HALF2_TO_CUI(var) *(reinterpret_cast<const unsigned int *>(&(var)))
117
-
118
- /* Macros for half & half2 binary arithmetic */
119
- #define __BINARY_OP_HALF_MACRO(name) /* do */ {\
120
- __half val; \
121
- asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16 %0,%1,%2;\n}" \
122
- :"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)),"h"(__HALF_TO_CUS(b))); \
123
- return val; \
124
- } /* while(0) */
125
- #define __BINARY_OP_HALF2_MACRO(name) /* do */ {\
126
- __half2 val; \
127
- asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16x2 %0,%1,%2;\n}" \
128
- :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \
129
- return val; \
130
- } /* while(0) */
131
- #define __TERNARY_OP_HALF_MACRO(name) /* do */ {\
132
- __half val; \
133
- asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16 %0,%1,%2,%3;\n}" \
134
- :"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)),"h"(__HALF_TO_CUS(b)),"h"(__HALF_TO_CUS(c))); \
135
- return val; \
136
- } /* while(0) */
137
- #define __TERNARY_OP_HALF2_MACRO(name) /* do */ {\
138
- __half2 val; \
139
- asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16x2 %0,%1,%2,%3;\n}" \
140
- :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b)),"r"(__HALF2_TO_CUI(c))); \
141
- return val; \
142
- } /* while(0) */
143
-
144
- /**
145
- * Types which allow static initialization of "half" and "half2" until
146
- * these become an actual builtin. Note this initialization is as a
147
- * bitfield representation of "half", and not a conversion from short->half.
148
- * Such a representation will be deprecated in a future version of CUDA.
149
- * (Note these are visible to non-nvcc compilers, including C-only compilation)
150
- */
151
- typedef struct __CUDA_ALIGN__(2) {
152
- unsigned short x;
153
- } __half_raw;
154
-
155
- typedef struct __CUDA_ALIGN__(4) {
156
- unsigned short x;
157
- unsigned short y;
158
- } __half2_raw;
159
-
160
- /* All other definitions in this file are only visible to C++ compilers */
161
- #if defined(__cplusplus)
162
-
163
- /* Hide GCC member initialization list warnings because of host/device in-function init requirement */
164
- #if defined(__GNUC__)
165
- #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
166
- #pragma GCC diagnostic push
167
- #pragma GCC diagnostic ignored "-Wstrict-aliasing"
168
- #pragma GCC diagnostic ignored "-Weffc++"
169
- #endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */
170
- #endif /* defined(__GNUC__) */
171
-
172
- /* class' : multiple assignment operators specified
173
- The class has multiple assignment operators of a single type. This warning is informational */
174
- #if defined(_MSC_VER) && _MSC_VER >= 1500
175
- #pragma warning( push )
176
- #pragma warning( disable:4522 )
177
- #endif /* defined(__GNUC__) */
178
-
179
- struct __CUDA_ALIGN__(2) __half {
180
- protected:
181
- unsigned short __x;
182
-
183
- public:
184
- #if defined(__CPP_VERSION_AT_LEAST_11_FP16)
185
- __half() = default;
186
- #else
187
- __CUDA_HOSTDEVICE__ __half() { }
188
- #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */
189
-
190
- /* Convert to/from __half_raw */
191
- __CUDA_HOSTDEVICE__ __half(const __half_raw &hr) : __x(hr.x) { }
192
- __CUDA_HOSTDEVICE__ __half &operator=(const __half_raw &hr) { __x = hr.x; return *this; }
193
- __CUDA_HOSTDEVICE__ volatile __half &operator=(const __half_raw &hr) volatile { __x = hr.x; return *this; }
194
- __CUDA_HOSTDEVICE__ volatile __half &operator=(const volatile __half_raw &hr) volatile { __x = hr.x; return *this; }
195
- __CUDA_HOSTDEVICE__ operator __half_raw() const { __half_raw ret; ret.x = __x; return ret; }
196
- __CUDA_HOSTDEVICE__ operator __half_raw() const volatile { __half_raw ret; ret.x = __x; return ret; }
197
-
198
- #if !defined(__CUDA_NO_HALF_CONVERSIONS__)
199
-
200
- /* Construct from float/double */
201
- __CUDA_HOSTDEVICE__ __half(const float f) { __x = __float2half(f).__x; }
202
- __CUDA_HOSTDEVICE__ __half(const double f) { __x = __double2half(f).__x; }
203
-
204
- __CUDA_HOSTDEVICE__ operator float() const { return __half2float(*this); }
205
- __CUDA_HOSTDEVICE__ __half &operator=(const float f) { __x = __float2half(f).__x; return *this; }
206
-
207
- /* We omit "cast to double" operator, so as to not be ambiguous about up-cast */
208
- __CUDA_HOSTDEVICE__ __half &operator=(const double f) { __x = __double2half(f).__x; return *this; }
209
-
210
- /* Member functions only available to nvcc compilation so far */
211
- #if defined(__CUDACC__)
212
- /* Allow automatic construction from types supported natively in hardware */
213
- /* Note we do avoid constructor init-list because of special host/device compilation rules */
214
- __CUDA_HOSTDEVICE__ __half(const short val) { __x = __short2half_rn(val).__x; }
215
- __CUDA_HOSTDEVICE__ __half(const unsigned short val) { __x = __ushort2half_rn(val).__x; }
216
- __CUDA_HOSTDEVICE__ __half(const int val) { __x = __int2half_rn(val).__x; }
217
- __CUDA_HOSTDEVICE__ __half(const unsigned int val) { __x = __uint2half_rn(val).__x; }
218
- __CUDA_HOSTDEVICE__ __half(const long long val) { __x = __ll2half_rn(val).__x; }
219
- __CUDA_HOSTDEVICE__ __half(const unsigned long long val) { __x = __ull2half_rn(val).__x; }
220
-
221
- /* Allow automatic casts to supported builtin types, matching all that are permitted with float */
222
- __CUDA_HOSTDEVICE__ operator short() const { return __half2short_rz(*this); }
223
- __CUDA_HOSTDEVICE__ __half &operator=(const short val) { __x = __short2half_rn(val).__x; return *this; }
224
-
225
- __CUDA_HOSTDEVICE__ operator unsigned short() const { return __half2ushort_rz(*this); }
226
- __CUDA_HOSTDEVICE__ __half &operator=(const unsigned short val) { __x = __ushort2half_rn(val).__x; return *this; }
227
-
228
- __CUDA_HOSTDEVICE__ operator int() const { return __half2int_rz(*this); }
229
- __CUDA_HOSTDEVICE__ __half &operator=(const int val) { __x = __int2half_rn(val).__x; return *this; }
230
-
231
- __CUDA_HOSTDEVICE__ operator unsigned int() const { return __half2uint_rz(*this); }
232
- __CUDA_HOSTDEVICE__ __half &operator=(const unsigned int val) { __x = __uint2half_rn(val).__x; return *this; }
233
-
234
- __CUDA_HOSTDEVICE__ operator long long() const { return __half2ll_rz(*this); }
235
- __CUDA_HOSTDEVICE__ __half &operator=(const long long val) { __x = __ll2half_rn(val).__x; return *this; }
236
-
237
- __CUDA_HOSTDEVICE__ operator unsigned long long() const { return __half2ull_rz(*this); }
238
- __CUDA_HOSTDEVICE__ __half &operator=(const unsigned long long val) { __x = __ull2half_rn(val).__x; return *this; }
239
-
240
- /* Boolean conversion - note both 0 and -0 must return false */
241
- __CUDA_HOSTDEVICE__ operator bool() const { return (__x & 0x7FFFU) != 0U; }
242
- #endif /* defined(__CUDACC__) */
243
- #endif /* !defined(__CUDA_NO_HALF_CONVERSIONS__) */
244
- };
245
-
246
- /* Global-space operator functions are only available to nvcc compilation */
247
- #if defined(__CUDACC__)
248
-
249
- /* Arithmetic FP16 operations only supported on arch >= 5.3 */
250
- #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
251
- #if !defined(__CUDA_NO_HALF_OPERATORS__)
252
- /* Some basic arithmetic operations expected of a builtin */
253
- __device__ __forceinline__ __half operator+(const __half &lh, const __half &rh) { return __hadd(lh, rh); }
254
- __device__ __forceinline__ __half operator-(const __half &lh, const __half &rh) { return __hsub(lh, rh); }
255
- __device__ __forceinline__ __half operator*(const __half &lh, const __half &rh) { return __hmul(lh, rh); }
256
- __device__ __forceinline__ __half operator/(const __half &lh, const __half &rh) { return __hdiv(lh, rh); }
257
-
258
- __device__ __forceinline__ __half &operator+=(__half &lh, const __half &rh) { lh = __hadd(lh, rh); return lh; }
259
- __device__ __forceinline__ __half &operator-=(__half &lh, const __half &rh) { lh = __hsub(lh, rh); return lh; }
260
- __device__ __forceinline__ __half &operator*=(__half &lh, const __half &rh) { lh = __hmul(lh, rh); return lh; }
261
- __device__ __forceinline__ __half &operator/=(__half &lh, const __half &rh) { lh = __hdiv(lh, rh); return lh; }
262
-
263
- /* Note for increment and decrement we use the raw value 0x3C00U equating to half(1.0F), to avoid the extra conversion */
264
- __device__ __forceinline__ __half &operator++(__half &h) { __half_raw one; one.x = 0x3C00U; h += one; return h; }
265
- __device__ __forceinline__ __half &operator--(__half &h) { __half_raw one; one.x = 0x3C00U; h -= one; return h; }
266
- __device__ __forceinline__ __half operator++(__half &h, const int ignored)
267
- {
268
- // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators.
269
- static_cast<void>(ignored);
270
-
271
- const __half ret = h;
272
- __half_raw one;
273
- one.x = 0x3C00U;
274
- h += one;
275
- return ret;
276
- }
277
- __device__ __forceinline__ __half operator--(__half &h, const int ignored)
278
- {
279
- // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators.
280
- static_cast<void>(ignored);
281
-
282
- const __half ret = h;
283
- __half_raw one;
284
- one.x = 0x3C00U;
285
- h -= one;
286
- return ret;
287
- }
288
-
289
- /* Unary plus and inverse operators */
290
- __device__ __forceinline__ __half operator+(const __half &h) { return h; }
291
- __device__ __forceinline__ __half operator-(const __half &h) { return __hneg(h); }
292
-
293
- /* Some basic comparison operations to make it look like a builtin */
294
- __device__ __forceinline__ bool operator==(const __half &lh, const __half &rh) { return __heq(lh, rh); }
295
- __device__ __forceinline__ bool operator!=(const __half &lh, const __half &rh) { return __hneu(lh, rh); }
296
- __device__ __forceinline__ bool operator> (const __half &lh, const __half &rh) { return __hgt(lh, rh); }
297
- __device__ __forceinline__ bool operator< (const __half &lh, const __half &rh) { return __hlt(lh, rh); }
298
- __device__ __forceinline__ bool operator>=(const __half &lh, const __half &rh) { return __hge(lh, rh); }
299
- __device__ __forceinline__ bool operator<=(const __half &lh, const __half &rh) { return __hle(lh, rh); }
300
- #endif /* !defined(__CUDA_NO_HALF_OPERATORS__) */
301
- #endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) */
302
- #endif /* defined(__CUDACC__) */
303
-
304
- /* __half2 is visible to non-nvcc host compilers */
305
- struct __CUDA_ALIGN__(4) __half2 {
306
- __half x;
307
- __half y;
308
-
309
- // All construct/copy/assign/move
310
- public:
311
- #if defined(__CPP_VERSION_AT_LEAST_11_FP16)
312
- __half2() = default;
313
- __CUDA_HOSTDEVICE__ __half2(const __half2 &&src) { __HALF2_TO_UI(*this) = std::move(__HALF2_TO_CUI(src)); }
314
- __CUDA_HOSTDEVICE__ __half2 &operator=(const __half2 &&src) { __HALF2_TO_UI(*this) = std::move(__HALF2_TO_CUI(src)); return *this; }
315
- #else
316
- __CUDA_HOSTDEVICE__ __half2() { }
317
- #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */
318
- __CUDA_HOSTDEVICE__ __half2(const __half &a, const __half &b) : x(a), y(b) { }
319
- __CUDA_HOSTDEVICE__ __half2(const __half2 &src) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(src); }
320
- __CUDA_HOSTDEVICE__ __half2 &operator=(const __half2 &src) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(src); return *this; }
321
-
322
- /* Convert to/from __half2_raw */
323
- __CUDA_HOSTDEVICE__ __half2(const __half2_raw &h2r ) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(h2r); }
324
- __CUDA_HOSTDEVICE__ __half2 &operator=(const __half2_raw &h2r) { __HALF2_TO_UI(*this) = __HALF2_TO_CUI(h2r); return *this; }
325
- __CUDA_HOSTDEVICE__ operator __half2_raw() const { __half2_raw ret; ret.x = 0U; ret.y = 0U; __HALF2_TO_UI(ret) = __HALF2_TO_CUI(*this); return ret; }
326
- };
327
-
328
- /* Global-space operator functions are only available to nvcc compilation */
329
- #if defined(__CUDACC__)
330
-
331
- /* Arithmetic FP16x2 operations only supported on arch >= 5.3 */
332
- #if (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)) && !defined(__CUDA_NO_HALF2_OPERATORS__)
333
-
334
- __device__ __forceinline__ __half2 operator+(const __half2 &lh, const __half2 &rh) { return __hadd2(lh, rh); }
335
- __device__ __forceinline__ __half2 operator-(const __half2 &lh, const __half2 &rh) { return __hsub2(lh, rh); }
336
- __device__ __forceinline__ __half2 operator*(const __half2 &lh, const __half2 &rh) { return __hmul2(lh, rh); }
337
- __device__ __forceinline__ __half2 operator/(const __half2 &lh, const __half2 &rh) { return __h2div(lh, rh); }
338
-
339
- __device__ __forceinline__ __half2& operator+=(__half2 &lh, const __half2 &rh) { lh = __hadd2(lh, rh); return lh; }
340
- __device__ __forceinline__ __half2& operator-=(__half2 &lh, const __half2 &rh) { lh = __hsub2(lh, rh); return lh; }
341
- __device__ __forceinline__ __half2& operator*=(__half2 &lh, const __half2 &rh) { lh = __hmul2(lh, rh); return lh; }
342
- __device__ __forceinline__ __half2& operator/=(__half2 &lh, const __half2 &rh) { lh = __h2div(lh, rh); return lh; }
343
-
344
- __device__ __forceinline__ __half2 &operator++(__half2 &h) { __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hadd2(h, one); return h; }
345
- __device__ __forceinline__ __half2 &operator--(__half2 &h) { __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hsub2(h, one); return h; }
346
- __device__ __forceinline__ __half2 operator++(__half2 &h, const int ignored)
347
- {
348
- // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators.
349
- static_cast<void>(ignored);
350
-
351
- const __half2 ret = h;
352
- __half2_raw one;
353
- one.x = 0x3C00U;
354
- one.y = 0x3C00U;
355
- h = __hadd2(h, one);
356
- return ret;
357
- }
358
- __device__ __forceinline__ __half2 operator--(__half2 &h, const int ignored)
359
- {
360
- // ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators.
361
- static_cast<void>(ignored);
362
-
363
- const __half2 ret = h;
364
- __half2_raw one;
365
- one.x = 0x3C00U;
366
- one.y = 0x3C00U;
367
- h = __hsub2(h, one);
368
- return ret;
369
- }
370
-
371
- __device__ __forceinline__ __half2 operator+(const __half2 &h) { return h; }
372
- __device__ __forceinline__ __half2 operator-(const __half2 &h) { return __hneg2(h); }
373
-
374
- __device__ __forceinline__ bool operator==(const __half2 &lh, const __half2 &rh) { return __hbeq2(lh, rh); }
375
- __device__ __forceinline__ bool operator!=(const __half2 &lh, const __half2 &rh) { return __hbneu2(lh, rh); }
376
- __device__ __forceinline__ bool operator>(const __half2 &lh, const __half2 &rh) { return __hbgt2(lh, rh); }
377
- __device__ __forceinline__ bool operator<(const __half2 &lh, const __half2 &rh) { return __hblt2(lh, rh); }
378
- __device__ __forceinline__ bool operator>=(const __half2 &lh, const __half2 &rh) { return __hbge2(lh, rh); }
379
- __device__ __forceinline__ bool operator<=(const __half2 &lh, const __half2 &rh) { return __hble2(lh, rh); }
380
-
381
- #endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) */
382
- #endif /* defined(__CUDACC__) */
383
-
384
- /* Restore warning for multiple assignment operators */
385
- #if defined(_MSC_VER) && _MSC_VER >= 1500
386
- #pragma warning( pop )
387
- #endif /* defined(_MSC_VER) && _MSC_VER >= 1500 */
388
-
389
- /* Restore -Weffc++ warnings from here on */
390
- #if defined(__GNUC__)
391
- #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
392
- #pragma GCC diagnostic pop
393
- #endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */
394
- #endif /* defined(__GNUC__) */
395
-
396
- #undef __CUDA_HOSTDEVICE__
397
- #undef __CUDA_ALIGN__
398
-
399
- #ifndef __CUDACC_RTC__ /* no host functions in NVRTC mode */
400
- static inline unsigned short __internal_float2half(const float f, unsigned int &sign, unsigned int &remainder)
401
- {
402
- unsigned int x;
403
- unsigned int u;
404
- unsigned int result;
405
- #if defined(__CUDACC__)
406
- (void)memcpy(&x, &f, sizeof(f));
407
- #else
408
- (void)std::memcpy(&x, &f, sizeof(f));
409
- #endif
410
- u = (x & 0x7fffffffU);
411
- sign = ((x >> 16U) & 0x8000U);
412
- // NaN/+Inf/-Inf
413
- if (u >= 0x7f800000U) {
414
- remainder = 0U;
415
- result = ((u == 0x7f800000U) ? (sign | 0x7c00U) : 0x7fffU);
416
- } else if (u > 0x477fefffU) { // Overflows
417
- remainder = 0x80000000U;
418
- result = (sign | 0x7bffU);
419
- } else if (u >= 0x38800000U) { // Normal numbers
420
- remainder = u << 19U;
421
- u -= 0x38000000U;
422
- result = (sign | (u >> 13U));
423
- } else if (u < 0x33000001U) { // +0/-0
424
- remainder = u;
425
- result = sign;
426
- } else { // Denormal numbers
427
- const unsigned int exponent = u >> 23U;
428
- const unsigned int shift = 0x7eU - exponent;
429
- unsigned int mantissa = (u & 0x7fffffU);
430
- mantissa |= 0x800000U;
431
- remainder = mantissa << (32U - shift);
432
- result = (sign | (mantissa >> shift));
433
- result &= 0x0000FFFFU;
434
- }
435
- return static_cast<unsigned short>(result);
436
- }
437
- #endif /* #if !defined(__CUDACC_RTC__) */
438
-
439
- __CUDA_HOSTDEVICE_FP16_DECL__ __half __double2half(const double a)
440
- {
441
- #if defined(__CUDA_ARCH__)
442
- __half val;
443
- asm("{ cvt.rn.f16.f64 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "d"(a));
444
- return val;
445
- #else
446
- __half result;
447
- /*
448
- // Perform rounding to 11 bits of precision, convert value
449
- // to float and call existing float to half conversion.
450
- // By pre-rounding to 11 bits we avoid additional rounding
451
- // in float to half conversion.
452
- */
453
- unsigned long long int absa;
454
- unsigned long long int ua;
455
- #if defined(__CUDACC__)
456
- (void)memcpy(&ua, &a, sizeof(a));
457
- #else
458
- (void)std::memcpy(&ua, &a, sizeof(a));
459
- #endif
460
- absa = (ua & 0x7fffffffffffffffULL);
461
- if ((absa >= 0x40f0000000000000ULL) || (absa <= 0x3e60000000000000ULL))
462
- {
463
- /*
464
- // |a| >= 2^16 or NaN or |a| <= 2^(-25)
465
- // double-rounding is not a problem
466
- */
467
- result = __float2half(static_cast<float>(a));
468
- }
469
- else
470
- {
471
- /*
472
- // here 2^(-25) < |a| < 2^16
473
- // prepare shifter value such that a + shifter
474
- // done in double precision performs round-to-nearest-even
475
- // and (a + shifter) - shifter results in a rounded to
476
- // 11 bits of precision. Shifter needs to have exponent of
477
- // a plus 53 - 11 = 42 and a leading bit in mantissa to guard
478
- // against negative values.
479
- // So need to have |a| capped to avoid overflow in exponent.
480
- // For inputs that are smaller than half precision minnorm
481
- // we prepare fixed shifter exponent.
482
- */
483
- unsigned long long shifterBits;
484
- if (absa >= 0x3f10000000000000ULL)
485
- {
486
- /*
487
- // Here if |a| >= 2^(-14)
488
- // add 42 to exponent bits
489
- */
490
- shifterBits = (ua & 0x7ff0000000000000ULL) + 0x02A0000000000000ULL;
491
- }
492
- else
493
- {
494
- /*
495
- // 2^(-25) < |a| < 2^(-14), potentially results in denormal
496
- // set exponent bits to 42 - 14 + bias
497
- */
498
- shifterBits = 0x41B0000000000000ULL;
499
- }
500
- // set leading mantissa bit to protect against negative inputs
501
- shifterBits |= 0x0008000000000000ULL;
502
- double shifter;
503
- #if defined(__CUDACC__)
504
- (void)memcpy(&shifter, &shifterBits, sizeof(shifterBits));
505
- #else
506
- (void)std::memcpy(&shifter, &shifterBits, sizeof(shifterBits));
507
- #endif
508
- double aShiftRound = a + shifter;
509
-
510
- /*
511
- // Prevent the compiler from optimizing away a + shifter - shifter
512
- // by doing intermediate memcopy and harmless bitwize operation
513
- */
514
- unsigned long long int aShiftRoundBits;
515
- #if defined(__CUDACC__)
516
- (void)memcpy(&aShiftRoundBits, &aShiftRound, sizeof(aShiftRound));
517
- #else
518
- (void)std::memcpy(&aShiftRoundBits, &aShiftRound, sizeof(aShiftRound));
519
- #endif
520
-
521
- // the value is positive, so this operation doesn't change anything
522
- aShiftRoundBits &= 0x7fffffffffffffffULL;
523
-
524
- #if defined(__CUDACC__)
525
- (void)memcpy(&aShiftRound, &aShiftRoundBits, sizeof(aShiftRound));
526
- #else
527
- (void)std::memcpy(&aShiftRound, &aShiftRoundBits, sizeof(aShiftRound));
528
- #endif
529
-
530
- result = __float2half(static_cast<float>(aShiftRound - shifter));
531
- }
532
-
533
- return result;
534
- #endif
535
- }
536
-
537
- __CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half(const float a)
538
- {
539
- __half val;
540
- #if defined(__CUDA_ARCH__)
541
- asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
542
- #else
543
- __half_raw r;
544
- unsigned int sign = 0U;
545
- unsigned int remainder = 0U;
546
- r.x = __internal_float2half(a, sign, remainder);
547
- if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) {
548
- r.x++;
549
- }
550
- val = r;
551
- #endif
552
- return val;
553
- }
554
- __CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rn(const float a)
555
- {
556
- __half val;
557
- #if defined(__CUDA_ARCH__)
558
- asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
559
- #else
560
- __half_raw r;
561
- unsigned int sign = 0U;
562
- unsigned int remainder = 0U;
563
- r.x = __internal_float2half(a, sign, remainder);
564
- if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) {
565
- r.x++;
566
- }
567
- val = r;
568
- #endif
569
- return val;
570
- }
571
- __CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rz(const float a)
572
- {
573
- __half val;
574
- #if defined(__CUDA_ARCH__)
575
- asm("{ cvt.rz.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
576
- #else
577
- __half_raw r;
578
- unsigned int sign = 0U;
579
- unsigned int remainder = 0U;
580
- r.x = __internal_float2half(a, sign, remainder);
581
- val = r;
582
- #endif
583
- return val;
584
- }
585
- __CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rd(const float a)
586
- {
587
- __half val;
588
- #if defined(__CUDA_ARCH__)
589
- asm("{ cvt.rm.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
590
- #else
591
- __half_raw r;
592
- unsigned int sign = 0U;
593
- unsigned int remainder = 0U;
594
- r.x = __internal_float2half(a, sign, remainder);
595
- if ((remainder != 0U) && (sign != 0U)) {
596
- r.x++;
597
- }
598
- val = r;
599
- #endif
600
- return val;
601
- }
602
- __CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_ru(const float a)
603
- {
604
- __half val;
605
- #if defined(__CUDA_ARCH__)
606
- asm("{ cvt.rp.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
607
- #else
608
- __half_raw r;
609
- unsigned int sign = 0U;
610
- unsigned int remainder = 0U;
611
- r.x = __internal_float2half(a, sign, remainder);
612
- if ((remainder != 0U) && (sign == 0U)) {
613
- r.x++;
614
- }
615
- val = r;
616
- #endif
617
- return val;
618
- }
619
- __CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float2half2_rn(const float a)
620
- {
621
- __half2 val;
622
- #if defined(__CUDA_ARCH__)
623
- asm("{.reg .f16 low;\n"
624
- " cvt.rn.f16.f32 low, %1;\n"
625
- " mov.b32 %0, {low,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "f"(a));
626
- #else
627
- val = __half2(__float2half_rn(a), __float2half_rn(a));
628
- #endif
629
- return val;
630
- }
631
- __CUDA_HOSTDEVICE_FP16_DECL__ __half2 __floats2half2_rn(const float a, const float b)
632
- {
633
- __half2 val;
634
- #if defined(__CUDA_ARCH__)
635
- #if (__CUDA_ARCH__ >= 800)
636
- asm("{ cvt.rn.f16x2.f32 %0, %2, %1; }\n"
637
- : "=r"(__HALF2_TO_UI(val)) : "f"(a), "f"(b));
638
- #else
639
- asm("{.reg .f16 low,high;\n"
640
- " cvt.rn.f16.f32 low, %1;\n"
641
- " cvt.rn.f16.f32 high, %2;\n"
642
- " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "f"(a), "f"(b));
643
- #endif
644
- #else
645
- val = __half2(__float2half_rn(a), __float2half_rn(b));
646
- #endif
647
- return val;
648
- }
649
-
650
- #ifndef __CUDACC_RTC__ /* no host functions in NVRTC mode */
651
- static inline float __internal_half2float(const unsigned short h)
652
- {
653
- unsigned int sign = ((static_cast<unsigned int>(h) >> 15U) & 1U);
654
- unsigned int exponent = ((static_cast<unsigned int>(h) >> 10U) & 0x1fU);
655
- unsigned int mantissa = ((static_cast<unsigned int>(h) & 0x3ffU) << 13U);
656
- float f;
657
- if (exponent == 0x1fU) { /* NaN or Inf */
658
- /* discard sign of a NaN */
659
- sign = ((mantissa != 0U) ? (sign >> 1U) : sign);
660
- mantissa = ((mantissa != 0U) ? 0x7fffffU : 0U);
661
- exponent = 0xffU;
662
- } else if (exponent == 0U) { /* Denorm or Zero */
663
- if (mantissa != 0U) {
664
- unsigned int msb;
665
- exponent = 0x71U;
666
- do {
667
- msb = (mantissa & 0x400000U);
668
- mantissa <<= 1U; /* normalize */
669
- --exponent;
670
- } while (msb == 0U);
671
- mantissa &= 0x7fffffU; /* 1.mantissa is implicit */
672
- }
673
- } else {
674
- exponent += 0x70U;
675
- }
676
- const unsigned int u = ((sign << 31U) | (exponent << 23U) | mantissa);
677
- #if defined(__CUDACC__)
678
- (void)memcpy(&f, &u, sizeof(u));
679
- #else
680
- (void)std::memcpy(&f, &u, sizeof(u));
681
- #endif
682
- return f;
683
- }
684
- #endif /* !defined(__CUDACC_RTC__) */
685
-
686
- __CUDA_HOSTDEVICE_FP16_DECL__ float __half2float(const __half a)
687
- {
688
- float val;
689
- #if defined(__CUDA_ARCH__)
690
- asm("{ cvt.f32.f16 %0, %1;}\n" : "=f"(val) : "h"(__HALF_TO_CUS(a)));
691
- #else
692
- val = __internal_half2float(static_cast<__half_raw>(a).x);
693
- #endif
694
- return val;
695
- }
696
- __CUDA_HOSTDEVICE_FP16_DECL__ float __low2float(const __half2 a)
697
- {
698
- float val;
699
- #if defined(__CUDA_ARCH__)
700
- asm("{.reg .f16 low,high;\n"
701
- " mov.b32 {low,high},%1;\n"
702
- " cvt.f32.f16 %0, low;}\n" : "=f"(val) : "r"(__HALF2_TO_CUI(a)));
703
- #else
704
- val = __internal_half2float(static_cast<__half2_raw>(a).x);
705
- #endif
706
- return val;
707
- }
708
- __CUDA_HOSTDEVICE_FP16_DECL__ float __high2float(const __half2 a)
709
- {
710
- float val;
711
- #if defined(__CUDA_ARCH__)
712
- asm("{.reg .f16 low,high;\n"
713
- " mov.b32 {low,high},%1;\n"
714
- " cvt.f32.f16 %0, high;}\n" : "=f"(val) : "r"(__HALF2_TO_CUI(a)));
715
- #else
716
- val = __internal_half2float(static_cast<__half2_raw>(a).y);
717
- #endif
718
- return val;
719
- }
720
- __CUDA_HOSTDEVICE_FP16_DECL__ short int __half2short_rz(const __half h)
721
- {
722
- short int i;
723
- #if defined __CUDA_ARCH__
724
- asm("cvt.rzi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
725
- #else
726
- const float f = __half2float(h);
727
- const short int max_val = (short int)0x7fffU;
728
- const short int min_val = (short int)0x8000U;
729
- const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
730
- // saturation fixup
731
- if (bits > (unsigned short)0xF800U) {
732
- // NaN
733
- i = 0;
734
- } else if (f > static_cast<float>(max_val)) {
735
- // saturate maximum
736
- i = max_val;
737
- } else if (f < static_cast<float>(min_val)) {
738
- // saturate minimum
739
- i = min_val;
740
- } else {
741
- // normal value, conversion is well-defined
742
- i = static_cast<short int>(f);
743
- }
744
- #endif
745
- return i;
746
- }
747
- __CUDA_HOSTDEVICE_FP16_DECL__ unsigned short int __half2ushort_rz(const __half h)
748
- {
749
- unsigned short int i;
750
- #if defined __CUDA_ARCH__
751
- asm("cvt.rzi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
752
- #else
753
- const float f = __half2float(h);
754
- const unsigned short int max_val = 0xffffU;
755
- const unsigned short int min_val = 0U;
756
- const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
757
- // saturation fixup
758
- if (bits > (unsigned short)0xF800U) {
759
- // NaN
760
- i = 0U;
761
- } else if (f > static_cast<float>(max_val)) {
762
- // saturate maximum
763
- i = max_val;
764
- } else if (f < static_cast<float>(min_val)) {
765
- // saturate minimum
766
- i = min_val;
767
- } else {
768
- // normal value, conversion is well-defined
769
- i = static_cast<unsigned short int>(f);
770
- }
771
- #endif
772
- return i;
773
- }
774
- __CUDA_HOSTDEVICE_FP16_DECL__ int __half2int_rz(const __half h)
775
- {
776
- int i;
777
- #if defined __CUDA_ARCH__
778
- asm("cvt.rzi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
779
- #else
780
- const float f = __half2float(h);
781
- const int max_val = (int)0x7fffffffU;
782
- const int min_val = (int)0x80000000U;
783
- const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
784
- // saturation fixup
785
- if (bits > (unsigned short)0xF800U) {
786
- // NaN
787
- i = 0;
788
- } else if (f > static_cast<float>(max_val)) {
789
- // saturate maximum
790
- i = max_val;
791
- } else if (f < static_cast<float>(min_val)) {
792
- // saturate minimum
793
- i = min_val;
794
- } else {
795
- // normal value, conversion is well-defined
796
- i = static_cast<int>(f);
797
- }
798
- #endif
799
- return i;
800
- }
801
- __CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __half2uint_rz(const __half h)
802
- {
803
- unsigned int i;
804
- #if defined __CUDA_ARCH__
805
- asm("cvt.rzi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
806
- #else
807
- const float f = __half2float(h);
808
- const unsigned int max_val = 0xffffffffU;
809
- const unsigned int min_val = 0U;
810
- const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
811
- // saturation fixup
812
- if (bits > (unsigned short)0xF800U) {
813
- // NaN
814
- i = 0U;
815
- } else if (f > static_cast<float>(max_val)) {
816
- // saturate maximum
817
- i = max_val;
818
- } else if (f < static_cast<float>(min_val)) {
819
- // saturate minimum
820
- i = min_val;
821
- } else {
822
- // normal value, conversion is well-defined
823
- i = static_cast<unsigned int>(f);
824
- }
825
- #endif
826
- return i;
827
- }
828
- __CUDA_HOSTDEVICE_FP16_DECL__ long long int __half2ll_rz(const __half h)
829
- {
830
- long long int i;
831
- #if defined __CUDA_ARCH__
832
- asm("cvt.rzi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
833
- #else
834
- const float f = __half2float(h);
835
- const long long int max_val = (long long int)0x7fffffffffffffffULL;
836
- const long long int min_val = (long long int)0x8000000000000000ULL;
837
- const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
838
- // saturation fixup
839
- if (bits > (unsigned short)0xF800U) {
840
- // NaN
841
- i = min_val;
842
- } else if (f > static_cast<float>(max_val)) {
843
- // saturate maximum
844
- i = max_val;
845
- } else if (f < static_cast<float>(min_val)) {
846
- // saturate minimum
847
- i = min_val;
848
- } else {
849
- // normal value, conversion is well-defined
850
- i = static_cast<long long int>(f);
851
- }
852
- #endif
853
- return i;
854
- }
855
- __CUDA_HOSTDEVICE_FP16_DECL__ unsigned long long int __half2ull_rz(const __half h)
856
- {
857
- unsigned long long int i;
858
- #if defined __CUDA_ARCH__
859
- asm("cvt.rzi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
860
- #else
861
- const float f = __half2float(h);
862
- const unsigned long long int max_val = 0xffffffffffffffffULL;
863
- const unsigned long long int min_val = 0ULL;
864
- const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
865
- // saturation fixup
866
- if (bits > (unsigned short)0xF800U) {
867
- // NaN
868
- i = 0x8000000000000000ULL;
869
- } else if (f > static_cast<float>(max_val)) {
870
- // saturate maximum
871
- i = max_val;
872
- } else if (f < static_cast<float>(min_val)) {
873
- // saturate minimum
874
- i = min_val;
875
- } else {
876
- // normal value, conversion is well-defined
877
- i = static_cast<unsigned long long int>(f);
878
- }
879
- #endif
880
- return i;
881
- }
882
-
883
- /* Intrinsic functions only available to nvcc compilers */
884
- #if defined(__CUDACC__)
885
-
886
- /* CUDA vector-types compatible vector creation function (note returns __half2, not half2) */
887
- __VECTOR_FUNCTIONS_DECL__ __half2 make_half2(const __half x, const __half y)
888
- {
889
- __half2 t; t.x = x; t.y = y; return t;
890
- }
891
- #undef __VECTOR_FUNCTIONS_DECL__
892
-
893
-
894
- /* Definitions of intrinsics */
895
- __CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float22half2_rn(const float2 a)
896
- {
897
- const __half2 val = __floats2half2_rn(a.x, a.y);
898
- return val;
899
- }
900
- __CUDA_HOSTDEVICE_FP16_DECL__ float2 __half22float2(const __half2 a)
901
- {
902
- float hi_float;
903
- float lo_float;
904
- #if defined(__CUDA_ARCH__)
905
- asm("{.reg .f16 low,high;\n"
906
- " mov.b32 {low,high},%1;\n"
907
- " cvt.f32.f16 %0, low;}\n" : "=f"(lo_float) : "r"(__HALF2_TO_CUI(a)));
908
-
909
- asm("{.reg .f16 low,high;\n"
910
- " mov.b32 {low,high},%1;\n"
911
- " cvt.f32.f16 %0, high;}\n" : "=f"(hi_float) : "r"(__HALF2_TO_CUI(a)));
912
- #else
913
- lo_float = __internal_half2float(((__half2_raw)a).x);
914
- hi_float = __internal_half2float(((__half2_raw)a).y);
915
- #endif
916
- return make_float2(lo_float, hi_float);
917
- }
918
- __CUDA_FP16_DECL__ int __half2int_rn(const __half h)
919
- {
920
- int i;
921
- asm("cvt.rni.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
922
- return i;
923
- }
924
- __CUDA_FP16_DECL__ int __half2int_rd(const __half h)
925
- {
926
- int i;
927
- asm("cvt.rmi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
928
- return i;
929
- }
930
- __CUDA_FP16_DECL__ int __half2int_ru(const __half h)
931
- {
932
- int i;
933
- asm("cvt.rpi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
934
- return i;
935
- }
936
- __CUDA_HOSTDEVICE_FP16_DECL__ __half __int2half_rn(const int i)
937
- {
938
- __half h;
939
- #if defined(__CUDA_ARCH__)
940
- asm("cvt.rn.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
941
- #else
942
- // double-rounding is not a problem here: if integer
943
- // has more than 24 bits, it is already too large to
944
- // be represented in half precision, and result will
945
- // be infinity.
946
- const float f = static_cast<float>(i);
947
- h = __float2half_rn(f);
948
- #endif
949
- return h;
950
- }
951
- __CUDA_FP16_DECL__ __half __int2half_rz(const int i)
952
- {
953
- __half h;
954
- asm("cvt.rz.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
955
- return h;
956
- }
957
- __CUDA_FP16_DECL__ __half __int2half_rd(const int i)
958
- {
959
- __half h;
960
- asm("cvt.rm.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
961
- return h;
962
- }
963
- __CUDA_FP16_DECL__ __half __int2half_ru(const int i)
964
- {
965
- __half h;
966
- asm("cvt.rp.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
967
- return h;
968
- }
969
-
970
- __CUDA_FP16_DECL__ short int __half2short_rn(const __half h)
971
- {
972
- short int i;
973
- asm("cvt.rni.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
974
- return i;
975
- }
976
- __CUDA_FP16_DECL__ short int __half2short_rd(const __half h)
977
- {
978
- short int i;
979
- asm("cvt.rmi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
980
- return i;
981
- }
982
- __CUDA_FP16_DECL__ short int __half2short_ru(const __half h)
983
- {
984
- short int i;
985
- asm("cvt.rpi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
986
- return i;
987
- }
988
- __CUDA_HOSTDEVICE_FP16_DECL__ __half __short2half_rn(const short int i)
989
- {
990
- __half h;
991
- #if defined __CUDA_ARCH__
992
- asm("cvt.rn.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
993
- #else
994
- const float f = static_cast<float>(i);
995
- h = __float2half_rn(f);
996
- #endif
997
- return h;
998
- }
999
- __CUDA_FP16_DECL__ __half __short2half_rz(const short int i)
1000
- {
1001
- __half h;
1002
- asm("cvt.rz.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
1003
- return h;
1004
- }
1005
- __CUDA_FP16_DECL__ __half __short2half_rd(const short int i)
1006
- {
1007
- __half h;
1008
- asm("cvt.rm.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
1009
- return h;
1010
- }
1011
- __CUDA_FP16_DECL__ __half __short2half_ru(const short int i)
1012
- {
1013
- __half h;
1014
- asm("cvt.rp.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
1015
- return h;
1016
- }
1017
-
1018
- __CUDA_FP16_DECL__ unsigned int __half2uint_rn(const __half h)
1019
- {
1020
- unsigned int i;
1021
- asm("cvt.rni.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
1022
- return i;
1023
- }
1024
- __CUDA_FP16_DECL__ unsigned int __half2uint_rd(const __half h)
1025
- {
1026
- unsigned int i;
1027
- asm("cvt.rmi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
1028
- return i;
1029
- }
1030
- __CUDA_FP16_DECL__ unsigned int __half2uint_ru(const __half h)
1031
- {
1032
- unsigned int i;
1033
- asm("cvt.rpi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
1034
- return i;
1035
- }
1036
- __CUDA_HOSTDEVICE_FP16_DECL__ __half __uint2half_rn(const unsigned int i)
1037
- {
1038
- __half h;
1039
- #if defined __CUDA_ARCH__
1040
- asm("cvt.rn.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
1041
- #else
1042
- // double-rounding is not a problem here: if integer
1043
- // has more than 24 bits, it is already too large to
1044
- // be represented in half precision, and result will
1045
- // be infinity.
1046
- const float f = static_cast<float>(i);
1047
- h = __float2half_rn(f);
1048
- #endif
1049
- return h;
1050
- }
1051
- __CUDA_FP16_DECL__ __half __uint2half_rz(const unsigned int i)
1052
- {
1053
- __half h;
1054
- asm("cvt.rz.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
1055
- return h;
1056
- }
1057
- __CUDA_FP16_DECL__ __half __uint2half_rd(const unsigned int i)
1058
- {
1059
- __half h;
1060
- asm("cvt.rm.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
1061
- return h;
1062
- }
1063
- __CUDA_FP16_DECL__ __half __uint2half_ru(const unsigned int i)
1064
- {
1065
- __half h;
1066
- asm("cvt.rp.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
1067
- return h;
1068
- }
1069
-
1070
- __CUDA_FP16_DECL__ unsigned short int __half2ushort_rn(const __half h)
1071
- {
1072
- unsigned short int i;
1073
- asm("cvt.rni.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
1074
- return i;
1075
- }
1076
- __CUDA_FP16_DECL__ unsigned short int __half2ushort_rd(const __half h)
1077
- {
1078
- unsigned short int i;
1079
- asm("cvt.rmi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
1080
- return i;
1081
- }
1082
- __CUDA_FP16_DECL__ unsigned short int __half2ushort_ru(const __half h)
1083
- {
1084
- unsigned short int i;
1085
- asm("cvt.rpi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
1086
- return i;
1087
- }
1088
- __CUDA_HOSTDEVICE_FP16_DECL__ __half __ushort2half_rn(const unsigned short int i)
1089
- {
1090
- __half h;
1091
- #if defined __CUDA_ARCH__
1092
- asm("cvt.rn.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
1093
- #else
1094
- const float f = static_cast<float>(i);
1095
- h = __float2half_rn(f);
1096
- #endif
1097
- return h;
1098
- }
1099
- __CUDA_FP16_DECL__ __half __ushort2half_rz(const unsigned short int i)
1100
- {
1101
- __half h;
1102
- asm("cvt.rz.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
1103
- return h;
1104
- }
1105
- __CUDA_FP16_DECL__ __half __ushort2half_rd(const unsigned short int i)
1106
- {
1107
- __half h;
1108
- asm("cvt.rm.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
1109
- return h;
1110
- }
1111
- __CUDA_FP16_DECL__ __half __ushort2half_ru(const unsigned short int i)
1112
- {
1113
- __half h;
1114
- asm("cvt.rp.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
1115
- return h;
1116
- }
1117
-
1118
- __CUDA_FP16_DECL__ unsigned long long int __half2ull_rn(const __half h)
1119
- {
1120
- unsigned long long int i;
1121
- asm("cvt.rni.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
1122
- return i;
1123
- }
1124
- __CUDA_FP16_DECL__ unsigned long long int __half2ull_rd(const __half h)
1125
- {
1126
- unsigned long long int i;
1127
- asm("cvt.rmi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
1128
- return i;
1129
- }
1130
- __CUDA_FP16_DECL__ unsigned long long int __half2ull_ru(const __half h)
1131
- {
1132
- unsigned long long int i;
1133
- asm("cvt.rpi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
1134
- return i;
1135
- }
1136
- __CUDA_HOSTDEVICE_FP16_DECL__ __half __ull2half_rn(const unsigned long long int i)
1137
- {
1138
- __half h;
1139
- #if defined(__CUDA_ARCH__)
1140
- asm("cvt.rn.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
1141
- #else
1142
- // double-rounding is not a problem here: if integer
1143
- // has more than 24 bits, it is already too large to
1144
- // be represented in half precision, and result will
1145
- // be infinity.
1146
- const float f = static_cast<float>(i);
1147
- h = __float2half_rn(f);
1148
- #endif
1149
- return h;
1150
- }
1151
- __CUDA_FP16_DECL__ __half __ull2half_rz(const unsigned long long int i)
1152
- {
1153
- __half h;
1154
- asm("cvt.rz.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
1155
- return h;
1156
- }
1157
- __CUDA_FP16_DECL__ __half __ull2half_rd(const unsigned long long int i)
1158
- {
1159
- __half h;
1160
- asm("cvt.rm.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
1161
- return h;
1162
- }
1163
- __CUDA_FP16_DECL__ __half __ull2half_ru(const unsigned long long int i)
1164
- {
1165
- __half h;
1166
- asm("cvt.rp.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
1167
- return h;
1168
- }
1169
-
1170
- __CUDA_FP16_DECL__ long long int __half2ll_rn(const __half h)
1171
- {
1172
- long long int i;
1173
- asm("cvt.rni.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
1174
- return i;
1175
- }
1176
- __CUDA_FP16_DECL__ long long int __half2ll_rd(const __half h)
1177
- {
1178
- long long int i;
1179
- asm("cvt.rmi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
1180
- return i;
1181
- }
1182
- __CUDA_FP16_DECL__ long long int __half2ll_ru(const __half h)
1183
- {
1184
- long long int i;
1185
- asm("cvt.rpi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
1186
- return i;
1187
- }
1188
- __CUDA_HOSTDEVICE_FP16_DECL__ __half __ll2half_rn(const long long int i)
1189
- {
1190
- __half h;
1191
- #if defined(__CUDA_ARCH__)
1192
- asm("cvt.rn.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
1193
- #else
1194
- // double-rounding is not a problem here: if integer
1195
- // has more than 24 bits, it is already too large to
1196
- // be represented in half precision, and result will
1197
- // be infinity.
1198
- const float f = static_cast<float>(i);
1199
- h = __float2half_rn(f);
1200
- #endif
1201
- return h;
1202
- }
1203
- __CUDA_FP16_DECL__ __half __ll2half_rz(const long long int i)
1204
- {
1205
- __half h;
1206
- asm("cvt.rz.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
1207
- return h;
1208
- }
1209
- __CUDA_FP16_DECL__ __half __ll2half_rd(const long long int i)
1210
- {
1211
- __half h;
1212
- asm("cvt.rm.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
1213
- return h;
1214
- }
1215
- __CUDA_FP16_DECL__ __half __ll2half_ru(const long long int i)
1216
- {
1217
- __half h;
1218
- asm("cvt.rp.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
1219
- return h;
1220
- }
1221
-
1222
- __CUDA_FP16_DECL__ __half htrunc(const __half h)
1223
- {
1224
- __half r;
1225
- asm("cvt.rzi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h)));
1226
- return r;
1227
- }
1228
- __CUDA_FP16_DECL__ __half hceil(const __half h)
1229
- {
1230
- __half r;
1231
- asm("cvt.rpi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h)));
1232
- return r;
1233
- }
1234
- __CUDA_FP16_DECL__ __half hfloor(const __half h)
1235
- {
1236
- __half r;
1237
- asm("cvt.rmi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h)));
1238
- return r;
1239
- }
1240
- __CUDA_FP16_DECL__ __half hrint(const __half h)
1241
- {
1242
- __half r;
1243
- asm("cvt.rni.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h)));
1244
- return r;
1245
- }
1246
-
1247
- __CUDA_FP16_DECL__ __half2 h2trunc(const __half2 h)
1248
- {
1249
- __half2 val;
1250
- asm("{.reg .f16 low,high;\n"
1251
- " mov.b32 {low,high}, %1;\n"
1252
- " cvt.rzi.f16.f16 low, low;\n"
1253
- " cvt.rzi.f16.f16 high, high;\n"
1254
- " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h)));
1255
- return val;
1256
- }
1257
- __CUDA_FP16_DECL__ __half2 h2ceil(const __half2 h)
1258
- {
1259
- __half2 val;
1260
- asm("{.reg .f16 low,high;\n"
1261
- " mov.b32 {low,high}, %1;\n"
1262
- " cvt.rpi.f16.f16 low, low;\n"
1263
- " cvt.rpi.f16.f16 high, high;\n"
1264
- " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h)));
1265
- return val;
1266
- }
1267
- __CUDA_FP16_DECL__ __half2 h2floor(const __half2 h)
1268
- {
1269
- __half2 val;
1270
- asm("{.reg .f16 low,high;\n"
1271
- " mov.b32 {low,high}, %1;\n"
1272
- " cvt.rmi.f16.f16 low, low;\n"
1273
- " cvt.rmi.f16.f16 high, high;\n"
1274
- " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h)));
1275
- return val;
1276
- }
1277
- __CUDA_FP16_DECL__ __half2 h2rint(const __half2 h)
1278
- {
1279
- __half2 val;
1280
- asm("{.reg .f16 low,high;\n"
1281
- " mov.b32 {low,high}, %1;\n"
1282
- " cvt.rni.f16.f16 low, low;\n"
1283
- " cvt.rni.f16.f16 high, high;\n"
1284
- " mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h)));
1285
- return val;
1286
- }
1287
- __CUDA_FP16_DECL__ __half2 __lows2half2(const __half2 a, const __half2 b)
1288
- {
1289
- __half2 val;
1290
- asm("{.reg .f16 alow,ahigh,blow,bhigh;\n"
1291
- " mov.b32 {alow,ahigh}, %1;\n"
1292
- " mov.b32 {blow,bhigh}, %2;\n"
1293
- " mov.b32 %0, {alow,blow};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(b)));
1294
- return val;
1295
- }
1296
- __CUDA_FP16_DECL__ __half2 __highs2half2(const __half2 a, const __half2 b)
1297
- {
1298
- __half2 val;
1299
- asm("{.reg .f16 alow,ahigh,blow,bhigh;\n"
1300
- " mov.b32 {alow,ahigh}, %1;\n"
1301
- " mov.b32 {blow,bhigh}, %2;\n"
1302
- " mov.b32 %0, {ahigh,bhigh};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(b)));
1303
- return val;
1304
- }
1305
- __CUDA_FP16_DECL__ __half __low2half(const __half2 a)
1306
- {
1307
- __half ret;
1308
- asm("{.reg .f16 low,high;\n"
1309
- " mov.b32 {low,high}, %1;\n"
1310
- " mov.b16 %0, low;}" : "=h"(__HALF_TO_US(ret)) : "r"(__HALF2_TO_CUI(a)));
1311
- return ret;
1312
- }
1313
- __CUDA_FP16_DECL__ int __hisinf(const __half a)
1314
- {
1315
- int retval;
1316
- if (__HALF_TO_CUS(a) == 0xFC00U) {
1317
- retval = -1;
1318
- } else if (__HALF_TO_CUS(a) == 0x7C00U) {
1319
- retval = 1;
1320
- } else {
1321
- retval = 0;
1322
- }
1323
- return retval;
1324
- }
1325
- __CUDA_FP16_DECL__ __half2 __low2half2(const __half2 a)
1326
- {
1327
- __half2 val;
1328
- asm("{.reg .f16 low,high;\n"
1329
- " mov.b32 {low,high}, %1;\n"
1330
- " mov.b32 %0, {low,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
1331
- return val;
1332
- }
1333
- __CUDA_FP16_DECL__ __half2 __high2half2(const __half2 a)
1334
- {
1335
- __half2 val;
1336
- asm("{.reg .f16 low,high;\n"
1337
- " mov.b32 {low,high}, %1;\n"
1338
- " mov.b32 %0, {high,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
1339
- return val;
1340
- }
1341
- __CUDA_FP16_DECL__ __half __high2half(const __half2 a)
1342
- {
1343
- __half ret;
1344
- asm("{.reg .f16 low,high;\n"
1345
- " mov.b32 {low,high}, %1;\n"
1346
- " mov.b16 %0, high;}" : "=h"(__HALF_TO_US(ret)) : "r"(__HALF2_TO_CUI(a)));
1347
- return ret;
1348
- }
1349
- __CUDA_FP16_DECL__ __half2 __halves2half2(const __half a, const __half b)
1350
- {
1351
- __half2 val;
1352
- asm("{ mov.b32 %0, {%1,%2};}\n"
1353
- : "=r"(__HALF2_TO_UI(val)) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(b)));
1354
- return val;
1355
- }
1356
- __CUDA_FP16_DECL__ __half2 __half2half2(const __half a)
1357
- {
1358
- __half2 val;
1359
- asm("{ mov.b32 %0, {%1,%1};}\n"
1360
- : "=r"(__HALF2_TO_UI(val)) : "h"(__HALF_TO_CUS(a)));
1361
- return val;
1362
- }
1363
- __CUDA_FP16_DECL__ __half2 __lowhigh2highlow(const __half2 a)
1364
- {
1365
- __half2 val;
1366
- asm("{.reg .f16 low,high;\n"
1367
- " mov.b32 {low,high}, %1;\n"
1368
- " mov.b32 %0, {high,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
1369
- return val;
1370
- }
1371
- __CUDA_FP16_DECL__ short int __half_as_short(const __half h)
1372
- {
1373
- return static_cast<short int>(__HALF_TO_CUS(h));
1374
- }
1375
- __CUDA_FP16_DECL__ unsigned short int __half_as_ushort(const __half h)
1376
- {
1377
- return __HALF_TO_CUS(h);
1378
- }
1379
- __CUDA_FP16_DECL__ __half __short_as_half(const short int i)
1380
- {
1381
- __half h;
1382
- __HALF_TO_US(h) = static_cast<unsigned short int>(i);
1383
- return h;
1384
- }
1385
- __CUDA_FP16_DECL__ __half __ushort_as_half(const unsigned short int i)
1386
- {
1387
- __half h;
1388
- __HALF_TO_US(h) = i;
1389
- return h;
1390
- }
1391
-
1392
- /******************************************************************************
1393
- * __half arithmetic *
1394
- ******************************************************************************/
1395
- __CUDA_FP16_DECL__ __half __hmax(const __half a, const __half b)
1396
- {
1397
- #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)
1398
- __BINARY_OP_HALF_MACRO(max)
1399
- #else
1400
- const float fa = __half2float(a);
1401
- const float fb = __half2float(b);
1402
- float fr;
1403
- asm("{max.f32 %0,%1,%2;\n}"
1404
- :"=f"(fr) : "f"(fa), "f"(fb));
1405
- const __half hr = __float2half(fr);
1406
- return hr;
1407
- #endif
1408
- }
1409
- __CUDA_FP16_DECL__ __half __hmin(const __half a, const __half b)
1410
- {
1411
- #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)
1412
- __BINARY_OP_HALF_MACRO(min)
1413
- #else
1414
- const float fa = __half2float(a);
1415
- const float fb = __half2float(b);
1416
- float fr;
1417
- asm("{min.f32 %0,%1,%2;\n}"
1418
- :"=f"(fr) : "f"(fa), "f"(fb));
1419
- const __half hr = __float2half(fr);
1420
- return hr;
1421
- #endif
1422
- }
1423
-
1424
- /******************************************************************************
1425
- * __half2 arithmetic *
1426
- ******************************************************************************/
1427
- __CUDA_FP16_DECL__ __half2 __hmax2(const __half2 a, const __half2 b)
1428
- {
1429
- #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)
1430
- __BINARY_OP_HALF2_MACRO(max)
1431
- #else
1432
- const float2 fa = __half22float2(a);
1433
- const float2 fb = __half22float2(b);
1434
- float2 fr;
1435
- asm("{max.f32 %0,%1,%2;\n}"
1436
- :"=f"(fr.x) : "f"(fa.x), "f"(fb.x));
1437
- asm("{max.f32 %0,%1,%2;\n}"
1438
- :"=f"(fr.y) : "f"(fa.y), "f"(fb.y));
1439
- const __half2 hr = __float22half2_rn(fr);
1440
- return hr;
1441
- #endif
1442
- }
1443
- __CUDA_FP16_DECL__ __half2 __hmin2(const __half2 a, const __half2 b)
1444
- {
1445
- #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)
1446
- __BINARY_OP_HALF2_MACRO(min)
1447
- #else
1448
- const float2 fa = __half22float2(a);
1449
- const float2 fb = __half22float2(b);
1450
- float2 fr;
1451
- asm("{min.f32 %0,%1,%2;\n}"
1452
- :"=f"(fr.x) : "f"(fa.x), "f"(fb.x));
1453
- asm("{min.f32 %0,%1,%2;\n}"
1454
- :"=f"(fr.y) : "f"(fa.y), "f"(fb.y));
1455
- const __half2 hr = __float22half2_rn(fr);
1456
- return hr;
1457
- #endif
1458
- }
1459
-
1460
-
1461
- #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 300)
1462
- /******************************************************************************
1463
- * __half, __half2 warp shuffle *
1464
- ******************************************************************************/
1465
- #define __SHUFFLE_HALF2_MACRO(name) /* do */ {\
1466
- __half2 r; \
1467
- asm volatile ("{" __CUDA_FP16_STRINGIFY(name) " %0,%1,%2,%3;\n}" \
1468
- :"=r"(__HALF2_TO_UI(r)): "r"(__HALF2_TO_CUI(var)), "r"(delta), "r"(c)); \
1469
- return r; \
1470
- } /* while(0) */
1471
-
1472
- #define __SHUFFLE_SYNC_HALF2_MACRO(name) /* do */ {\
1473
- __half2 r; \
1474
- asm volatile ("{" __CUDA_FP16_STRINGIFY(name) " %0,%1,%2,%3,%4;\n}" \
1475
- :"=r"(__HALF2_TO_UI(r)): "r"(__HALF2_TO_CUI(var)), "r"(delta), "r"(c), "r"(mask)); \
1476
- return r; \
1477
- } /* while(0) */
1478
-
1479
- #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
1480
-
1481
- __CUDA_FP16_DECL__ __half2 __shfl(const __half2 var, const int delta, const int width)
1482
- {
1483
- unsigned int warp_size;
1484
- asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
1485
- const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
1486
- __SHUFFLE_HALF2_MACRO(shfl.idx.b32)
1487
- }
1488
- __CUDA_FP16_DECL__ __half2 __shfl_up(const __half2 var, const unsigned int delta, const int width)
1489
- {
1490
- unsigned int warp_size;
1491
- asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
1492
- const unsigned int c = (warp_size - static_cast<unsigned>(width)) << 8U;
1493
- __SHUFFLE_HALF2_MACRO(shfl.up.b32)
1494
- }
1495
- __CUDA_FP16_DECL__ __half2 __shfl_down(const __half2 var, const unsigned int delta, const int width)
1496
- {
1497
- unsigned int warp_size;
1498
- asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
1499
- const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
1500
- __SHUFFLE_HALF2_MACRO(shfl.down.b32)
1501
- }
1502
- __CUDA_FP16_DECL__ __half2 __shfl_xor(const __half2 var, const int delta, const int width)
1503
- {
1504
- unsigned int warp_size;
1505
- asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
1506
- const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
1507
- __SHUFFLE_HALF2_MACRO(shfl.bfly.b32)
1508
- }
1509
-
1510
- #endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 */
1511
-
1512
- __CUDA_FP16_DECL__ __half2 __shfl_sync(const unsigned mask, const __half2 var, const int delta, const int width)
1513
- {
1514
- unsigned int warp_size;
1515
- asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
1516
- const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
1517
- __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.idx.b32)
1518
- }
1519
- __CUDA_FP16_DECL__ __half2 __shfl_up_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width)
1520
- {
1521
- unsigned int warp_size;
1522
- asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
1523
- const unsigned int c = (warp_size - static_cast<unsigned>(width)) << 8U;
1524
- __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.up.b32)
1525
- }
1526
- __CUDA_FP16_DECL__ __half2 __shfl_down_sync(const unsigned mask, const __half2 var, const unsigned int delta, const int width)
1527
- {
1528
- unsigned int warp_size;
1529
- asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
1530
- const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
1531
- __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.down.b32)
1532
- }
1533
- __CUDA_FP16_DECL__ __half2 __shfl_xor_sync(const unsigned mask, const __half2 var, const int delta, const int width)
1534
- {
1535
- unsigned int warp_size;
1536
- asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
1537
- const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
1538
- __SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.bfly.b32)
1539
- }
1540
-
1541
- #undef __SHUFFLE_HALF2_MACRO
1542
- #undef __SHUFFLE_SYNC_HALF2_MACRO
1543
-
1544
- #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
1545
-
1546
- __CUDA_FP16_DECL__ __half __shfl(const __half var, const int delta, const int width)
1547
- {
1548
- const __half2 temp1 = __halves2half2(var, var);
1549
- const __half2 temp2 = __shfl(temp1, delta, width);
1550
- return __low2half(temp2);
1551
- }
1552
- __CUDA_FP16_DECL__ __half __shfl_up(const __half var, const unsigned int delta, const int width)
1553
- {
1554
- const __half2 temp1 = __halves2half2(var, var);
1555
- const __half2 temp2 = __shfl_up(temp1, delta, width);
1556
- return __low2half(temp2);
1557
- }
1558
- __CUDA_FP16_DECL__ __half __shfl_down(const __half var, const unsigned int delta, const int width)
1559
- {
1560
- const __half2 temp1 = __halves2half2(var, var);
1561
- const __half2 temp2 = __shfl_down(temp1, delta, width);
1562
- return __low2half(temp2);
1563
- }
1564
- __CUDA_FP16_DECL__ __half __shfl_xor(const __half var, const int delta, const int width)
1565
- {
1566
- const __half2 temp1 = __halves2half2(var, var);
1567
- const __half2 temp2 = __shfl_xor(temp1, delta, width);
1568
- return __low2half(temp2);
1569
- }
1570
-
1571
- #endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 */
1572
-
1573
- __CUDA_FP16_DECL__ __half __shfl_sync(const unsigned mask, const __half var, const int delta, const int width)
1574
- {
1575
- const __half2 temp1 = __halves2half2(var, var);
1576
- const __half2 temp2 = __shfl_sync(mask, temp1, delta, width);
1577
- return __low2half(temp2);
1578
- }
1579
- __CUDA_FP16_DECL__ __half __shfl_up_sync(const unsigned mask, const __half var, const unsigned int delta, const int width)
1580
- {
1581
- const __half2 temp1 = __halves2half2(var, var);
1582
- const __half2 temp2 = __shfl_up_sync(mask, temp1, delta, width);
1583
- return __low2half(temp2);
1584
- }
1585
- __CUDA_FP16_DECL__ __half __shfl_down_sync(const unsigned mask, const __half var, const unsigned int delta, const int width)
1586
- {
1587
- const __half2 temp1 = __halves2half2(var, var);
1588
- const __half2 temp2 = __shfl_down_sync(mask, temp1, delta, width);
1589
- return __low2half(temp2);
1590
- }
1591
- __CUDA_FP16_DECL__ __half __shfl_xor_sync(const unsigned mask, const __half var, const int delta, const int width)
1592
- {
1593
- const __half2 temp1 = __halves2half2(var, var);
1594
- const __half2 temp2 = __shfl_xor_sync(mask, temp1, delta, width);
1595
- return __low2half(temp2);
1596
- }
1597
-
1598
- #endif /*!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 300)*/
1599
- /******************************************************************************
1600
- * __half and __half2 __ldg,__ldcg,__ldca,__ldcs *
1601
- ******************************************************************************/
1602
-
1603
- #if defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320))
1604
- #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
1605
- #define __LDG_PTR "l"
1606
- #else
1607
- #define __LDG_PTR "r"
1608
- #endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/
1609
- __CUDA_FP16_DECL__ __half2 __ldg(const __half2 *const ptr)
1610
- {
1611
- __half2 ret;
1612
- asm ("ld.global.nc.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr));
1613
- return ret;
1614
- }
1615
- __CUDA_FP16_DECL__ __half __ldg(const __half *const ptr)
1616
- {
1617
- __half ret;
1618
- asm ("ld.global.nc.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr));
1619
- return ret;
1620
- }
1621
- __CUDA_FP16_DECL__ __half2 __ldcg(const __half2 *const ptr)
1622
- {
1623
- __half2 ret;
1624
- asm ("ld.global.cg.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr));
1625
- return ret;
1626
- }
1627
- __CUDA_FP16_DECL__ __half __ldcg(const __half *const ptr)
1628
- {
1629
- __half ret;
1630
- asm ("ld.global.cg.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr));
1631
- return ret;
1632
- }
1633
- __CUDA_FP16_DECL__ __half2 __ldca(const __half2 *const ptr)
1634
- {
1635
- __half2 ret;
1636
- asm ("ld.global.ca.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr));
1637
- return ret;
1638
- }
1639
- __CUDA_FP16_DECL__ __half __ldca(const __half *const ptr)
1640
- {
1641
- __half ret;
1642
- asm ("ld.global.ca.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr));
1643
- return ret;
1644
- }
1645
- __CUDA_FP16_DECL__ __half2 __ldcs(const __half2 *const ptr)
1646
- {
1647
- __half2 ret;
1648
- asm ("ld.global.cs.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr));
1649
- return ret;
1650
- }
1651
- __CUDA_FP16_DECL__ __half __ldcs(const __half *const ptr)
1652
- {
1653
- __half ret;
1654
- asm ("ld.global.cs.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr));
1655
- return ret;
1656
- }
1657
- __CUDA_FP16_DECL__ __half2 __ldlu(const __half2 *const ptr)
1658
- {
1659
- __half2 ret;
1660
- asm ("ld.global.lu.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr) : "memory");
1661
- return ret;
1662
- }
1663
- __CUDA_FP16_DECL__ __half __ldlu(const __half *const ptr)
1664
- {
1665
- __half ret;
1666
- asm ("ld.global.lu.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr) : "memory");
1667
- return ret;
1668
- }
1669
- __CUDA_FP16_DECL__ __half2 __ldcv(const __half2 *const ptr)
1670
- {
1671
- __half2 ret;
1672
- asm ("ld.global.cv.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr) : "memory");
1673
- return ret;
1674
- }
1675
- __CUDA_FP16_DECL__ __half __ldcv(const __half *const ptr)
1676
- {
1677
- __half ret;
1678
- asm ("ld.global.cv.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr) : "memory");
1679
- return ret;
1680
- }
1681
- __CUDA_FP16_DECL__ void __stwb(__half2 *const ptr, const __half2 value)
1682
- {
1683
- asm ("st.global.wb.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory");
1684
- }
1685
- __CUDA_FP16_DECL__ void __stwb(__half *const ptr, const __half value)
1686
- {
1687
- asm ("st.global.wb.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory");
1688
- }
1689
- __CUDA_FP16_DECL__ void __stcg(__half2 *const ptr, const __half2 value)
1690
- {
1691
- asm ("st.global.cg.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory");
1692
- }
1693
- __CUDA_FP16_DECL__ void __stcg(__half *const ptr, const __half value)
1694
- {
1695
- asm ("st.global.cg.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory");
1696
- }
1697
- __CUDA_FP16_DECL__ void __stcs(__half2 *const ptr, const __half2 value)
1698
- {
1699
- asm ("st.global.cs.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory");
1700
- }
1701
- __CUDA_FP16_DECL__ void __stcs(__half *const ptr, const __half value)
1702
- {
1703
- asm ("st.global.cs.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory");
1704
- }
1705
- __CUDA_FP16_DECL__ void __stwt(__half2 *const ptr, const __half2 value)
1706
- {
1707
- asm ("st.global.wt.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory");
1708
- }
1709
- __CUDA_FP16_DECL__ void __stwt(__half *const ptr, const __half value)
1710
- {
1711
- asm ("st.global.wt.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory");
1712
- }
1713
- #undef __LDG_PTR
1714
- #endif /*defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320))*/
1715
- #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
1716
- /******************************************************************************
1717
- * __half2 comparison *
1718
- ******************************************************************************/
1719
- #define __COMPARISON_OP_HALF2_MACRO(name) /* do */ {\
1720
- __half2 val; \
1721
- asm( "{ " __CUDA_FP16_STRINGIFY(name) ".f16x2.f16x2 %0,%1,%2;\n}" \
1722
- :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \
1723
- return val; \
1724
- } /* while(0) */
1725
- __CUDA_FP16_DECL__ __half2 __heq2(const __half2 a, const __half2 b)
1726
- {
1727
- __COMPARISON_OP_HALF2_MACRO(set.eq)
1728
- }
1729
- __CUDA_FP16_DECL__ __half2 __hne2(const __half2 a, const __half2 b)
1730
- {
1731
- __COMPARISON_OP_HALF2_MACRO(set.ne)
1732
- }
1733
- __CUDA_FP16_DECL__ __half2 __hle2(const __half2 a, const __half2 b)
1734
- {
1735
- __COMPARISON_OP_HALF2_MACRO(set.le)
1736
- }
1737
- __CUDA_FP16_DECL__ __half2 __hge2(const __half2 a, const __half2 b)
1738
- {
1739
- __COMPARISON_OP_HALF2_MACRO(set.ge)
1740
- }
1741
- __CUDA_FP16_DECL__ __half2 __hlt2(const __half2 a, const __half2 b)
1742
- {
1743
- __COMPARISON_OP_HALF2_MACRO(set.lt)
1744
- }
1745
- __CUDA_FP16_DECL__ __half2 __hgt2(const __half2 a, const __half2 b)
1746
- {
1747
- __COMPARISON_OP_HALF2_MACRO(set.gt)
1748
- }
1749
- __CUDA_FP16_DECL__ __half2 __hequ2(const __half2 a, const __half2 b)
1750
- {
1751
- __COMPARISON_OP_HALF2_MACRO(set.equ)
1752
- }
1753
- __CUDA_FP16_DECL__ __half2 __hneu2(const __half2 a, const __half2 b)
1754
- {
1755
- __COMPARISON_OP_HALF2_MACRO(set.neu)
1756
- }
1757
- __CUDA_FP16_DECL__ __half2 __hleu2(const __half2 a, const __half2 b)
1758
- {
1759
- __COMPARISON_OP_HALF2_MACRO(set.leu)
1760
- }
1761
- __CUDA_FP16_DECL__ __half2 __hgeu2(const __half2 a, const __half2 b)
1762
- {
1763
- __COMPARISON_OP_HALF2_MACRO(set.geu)
1764
- }
1765
- __CUDA_FP16_DECL__ __half2 __hltu2(const __half2 a, const __half2 b)
1766
- {
1767
- __COMPARISON_OP_HALF2_MACRO(set.ltu)
1768
- }
1769
- __CUDA_FP16_DECL__ __half2 __hgtu2(const __half2 a, const __half2 b)
1770
- {
1771
- __COMPARISON_OP_HALF2_MACRO(set.gtu)
1772
- }
1773
- #undef __COMPARISON_OP_HALF2_MACRO
1774
- #define __BOOL_COMPARISON_OP_HALF2_MACRO(name) /* do */ {\
1775
- __half2 val; \
1776
- bool retval; \
1777
- asm( "{ " __CUDA_FP16_STRINGIFY(name) ".f16x2.f16x2 %0,%1,%2;\n}" \
1778
- :"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \
1779
- if (__HALF2_TO_CUI(val) == 0x3C003C00U) {\
1780
- retval = true; \
1781
- } else { \
1782
- retval = false; \
1783
- }\
1784
- return retval;\
1785
- } /* while(0) */
1786
- __CUDA_FP16_DECL__ bool __hbeq2(const __half2 a, const __half2 b)
1787
- {
1788
- __BOOL_COMPARISON_OP_HALF2_MACRO(set.eq)
1789
- }
1790
- __CUDA_FP16_DECL__ bool __hbne2(const __half2 a, const __half2 b)
1791
- {
1792
- __BOOL_COMPARISON_OP_HALF2_MACRO(set.ne)
1793
- }
1794
- __CUDA_FP16_DECL__ bool __hble2(const __half2 a, const __half2 b)
1795
- {
1796
- __BOOL_COMPARISON_OP_HALF2_MACRO(set.le)
1797
- }
1798
- __CUDA_FP16_DECL__ bool __hbge2(const __half2 a, const __half2 b)
1799
- {
1800
- __BOOL_COMPARISON_OP_HALF2_MACRO(set.ge)
1801
- }
1802
- __CUDA_FP16_DECL__ bool __hblt2(const __half2 a, const __half2 b)
1803
- {
1804
- __BOOL_COMPARISON_OP_HALF2_MACRO(set.lt)
1805
- }
1806
- __CUDA_FP16_DECL__ bool __hbgt2(const __half2 a, const __half2 b)
1807
- {
1808
- __BOOL_COMPARISON_OP_HALF2_MACRO(set.gt)
1809
- }
1810
- __CUDA_FP16_DECL__ bool __hbequ2(const __half2 a, const __half2 b)
1811
- {
1812
- __BOOL_COMPARISON_OP_HALF2_MACRO(set.equ)
1813
- }
1814
- __CUDA_FP16_DECL__ bool __hbneu2(const __half2 a, const __half2 b)
1815
- {
1816
- __BOOL_COMPARISON_OP_HALF2_MACRO(set.neu)
1817
- }
1818
- __CUDA_FP16_DECL__ bool __hbleu2(const __half2 a, const __half2 b)
1819
- {
1820
- __BOOL_COMPARISON_OP_HALF2_MACRO(set.leu)
1821
- }
1822
- __CUDA_FP16_DECL__ bool __hbgeu2(const __half2 a, const __half2 b)
1823
- {
1824
- __BOOL_COMPARISON_OP_HALF2_MACRO(set.geu)
1825
- }
1826
- __CUDA_FP16_DECL__ bool __hbltu2(const __half2 a, const __half2 b)
1827
- {
1828
- __BOOL_COMPARISON_OP_HALF2_MACRO(set.ltu)
1829
- }
1830
- __CUDA_FP16_DECL__ bool __hbgtu2(const __half2 a, const __half2 b)
1831
- {
1832
- __BOOL_COMPARISON_OP_HALF2_MACRO(set.gtu)
1833
- }
1834
- #undef __BOOL_COMPARISON_OP_HALF2_MACRO
1835
- /******************************************************************************
1836
- * __half comparison *
1837
- ******************************************************************************/
1838
- #define __COMPARISON_OP_HALF_MACRO(name) /* do */ {\
1839
- unsigned short val; \
1840
- asm( "{ .reg .pred __$temp3;\n" \
1841
- " setp." __CUDA_FP16_STRINGIFY(name) ".f16 __$temp3, %1, %2;\n" \
1842
- " selp.u16 %0, 1, 0, __$temp3;}" \
1843
- : "=h"(val) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(b))); \
1844
- return (val != 0U) ? true : false; \
1845
- } /* while(0) */
1846
- __CUDA_FP16_DECL__ bool __heq(const __half a, const __half b)
1847
- {
1848
- __COMPARISON_OP_HALF_MACRO(eq)
1849
- }
1850
- __CUDA_FP16_DECL__ bool __hne(const __half a, const __half b)
1851
- {
1852
- __COMPARISON_OP_HALF_MACRO(ne)
1853
- }
1854
- __CUDA_FP16_DECL__ bool __hle(const __half a, const __half b)
1855
- {
1856
- __COMPARISON_OP_HALF_MACRO(le)
1857
- }
1858
- __CUDA_FP16_DECL__ bool __hge(const __half a, const __half b)
1859
- {
1860
- __COMPARISON_OP_HALF_MACRO(ge)
1861
- }
1862
- __CUDA_FP16_DECL__ bool __hlt(const __half a, const __half b)
1863
- {
1864
- __COMPARISON_OP_HALF_MACRO(lt)
1865
- }
1866
- __CUDA_FP16_DECL__ bool __hgt(const __half a, const __half b)
1867
- {
1868
- __COMPARISON_OP_HALF_MACRO(gt)
1869
- }
1870
- __CUDA_FP16_DECL__ bool __hequ(const __half a, const __half b)
1871
- {
1872
- __COMPARISON_OP_HALF_MACRO(equ)
1873
- }
1874
- __CUDA_FP16_DECL__ bool __hneu(const __half a, const __half b)
1875
- {
1876
- __COMPARISON_OP_HALF_MACRO(neu)
1877
- }
1878
- __CUDA_FP16_DECL__ bool __hleu(const __half a, const __half b)
1879
- {
1880
- __COMPARISON_OP_HALF_MACRO(leu)
1881
- }
1882
- __CUDA_FP16_DECL__ bool __hgeu(const __half a, const __half b)
1883
- {
1884
- __COMPARISON_OP_HALF_MACRO(geu)
1885
- }
1886
- __CUDA_FP16_DECL__ bool __hltu(const __half a, const __half b)
1887
- {
1888
- __COMPARISON_OP_HALF_MACRO(ltu)
1889
- }
1890
- __CUDA_FP16_DECL__ bool __hgtu(const __half a, const __half b)
1891
- {
1892
- __COMPARISON_OP_HALF_MACRO(gtu)
1893
- }
1894
- #undef __COMPARISON_OP_HALF_MACRO
1895
- /******************************************************************************
1896
- * __half2 arithmetic *
1897
- ******************************************************************************/
1898
- __CUDA_FP16_DECL__ __half2 __hadd2(const __half2 a, const __half2 b)
1899
- {
1900
- __BINARY_OP_HALF2_MACRO(add)
1901
- }
1902
- __CUDA_FP16_DECL__ __half2 __hsub2(const __half2 a, const __half2 b)
1903
- {
1904
- __BINARY_OP_HALF2_MACRO(sub)
1905
- }
1906
- __CUDA_FP16_DECL__ __half2 __hmul2(const __half2 a, const __half2 b)
1907
- {
1908
- __BINARY_OP_HALF2_MACRO(mul)
1909
- }
1910
- __CUDA_FP16_DECL__ __half2 __hadd2_sat(const __half2 a, const __half2 b)
1911
- {
1912
- __BINARY_OP_HALF2_MACRO(add.sat)
1913
- }
1914
- __CUDA_FP16_DECL__ __half2 __hsub2_sat(const __half2 a, const __half2 b)
1915
- {
1916
- __BINARY_OP_HALF2_MACRO(sub.sat)
1917
- }
1918
- __CUDA_FP16_DECL__ __half2 __hmul2_sat(const __half2 a, const __half2 b)
1919
- {
1920
- __BINARY_OP_HALF2_MACRO(mul.sat)
1921
- }
1922
- __CUDA_FP16_DECL__ __half2 __hadd2_rn(const __half2 a, const __half2 b)
1923
- {
1924
- __BINARY_OP_HALF2_MACRO(add.rn)
1925
- }
1926
- __CUDA_FP16_DECL__ __half2 __hsub2_rn(const __half2 a, const __half2 b)
1927
- {
1928
- __BINARY_OP_HALF2_MACRO(sub.rn)
1929
- }
1930
- __CUDA_FP16_DECL__ __half2 __hmul2_rn(const __half2 a, const __half2 b)
1931
- {
1932
- __BINARY_OP_HALF2_MACRO(mul.rn)
1933
- }
1934
- __CUDA_FP16_DECL__ __half2 __hfma2(const __half2 a, const __half2 b, const __half2 c)
1935
- {
1936
- __TERNARY_OP_HALF2_MACRO(fma.rn)
1937
- }
1938
- __CUDA_FP16_DECL__ __half2 __hfma2_sat(const __half2 a, const __half2 b, const __half2 c)
1939
- {
1940
- __TERNARY_OP_HALF2_MACRO(fma.rn.sat)
1941
- }
1942
- __CUDA_FP16_DECL__ __half2 __h2div(const __half2 a, const __half2 b) {
1943
- __half ha = __low2half(a);
1944
- __half hb = __low2half(b);
1945
-
1946
- const __half v1 = __hdiv(ha, hb);
1947
-
1948
- ha = __high2half(a);
1949
- hb = __high2half(b);
1950
-
1951
- const __half v2 = __hdiv(ha, hb);
1952
-
1953
- return __halves2half2(v1, v2);
1954
- }
1955
- /******************************************************************************
1956
- * __half arithmetic *
1957
- ******************************************************************************/
1958
- __CUDA_FP16_DECL__ __half __hadd(const __half a, const __half b)
1959
- {
1960
- __BINARY_OP_HALF_MACRO(add)
1961
- }
1962
- __CUDA_FP16_DECL__ __half __hsub(const __half a, const __half b)
1963
- {
1964
- __BINARY_OP_HALF_MACRO(sub)
1965
- }
1966
- __CUDA_FP16_DECL__ __half __hmul(const __half a, const __half b)
1967
- {
1968
- __BINARY_OP_HALF_MACRO(mul)
1969
- }
1970
- __CUDA_FP16_DECL__ __half __hadd_sat(const __half a, const __half b)
1971
- {
1972
- __BINARY_OP_HALF_MACRO(add.sat)
1973
- }
1974
- __CUDA_FP16_DECL__ __half __hsub_sat(const __half a, const __half b)
1975
- {
1976
- __BINARY_OP_HALF_MACRO(sub.sat)
1977
- }
1978
- __CUDA_FP16_DECL__ __half __hmul_sat(const __half a, const __half b)
1979
- {
1980
- __BINARY_OP_HALF_MACRO(mul.sat)
1981
- }
1982
- __CUDA_FP16_DECL__ __half __hadd_rn(const __half a, const __half b)
1983
- {
1984
- __BINARY_OP_HALF_MACRO(add.rn)
1985
- }
1986
- __CUDA_FP16_DECL__ __half __hsub_rn(const __half a, const __half b)
1987
- {
1988
- __BINARY_OP_HALF_MACRO(sub.rn)
1989
- }
1990
- __CUDA_FP16_DECL__ __half __hmul_rn(const __half a, const __half b)
1991
- {
1992
- __BINARY_OP_HALF_MACRO(mul.rn)
1993
- }
1994
- __CUDA_FP16_DECL__ __half __hfma(const __half a, const __half b, const __half c)
1995
- {
1996
- __TERNARY_OP_HALF_MACRO(fma.rn)
1997
- }
1998
- __CUDA_FP16_DECL__ __half __hfma_sat(const __half a, const __half b, const __half c)
1999
- {
2000
- __TERNARY_OP_HALF_MACRO(fma.rn.sat)
2001
- }
2002
- __CUDA_FP16_DECL__ __half __hdiv(const __half a, const __half b) {
2003
- __half v;
2004
- __half abs;
2005
- __half den;
2006
- __HALF_TO_US(den) = 0x008FU;
2007
-
2008
- float rcp;
2009
- const float fa = __half2float(a);
2010
- const float fb = __half2float(b);
2011
-
2012
- asm("{rcp.approx.ftz.f32 %0, %1;\n}" :"=f"(rcp) : "f"(fb));
2013
-
2014
- float fv = rcp * fa;
2015
-
2016
- v = __float2half(fv);
2017
- __HALF_TO_US(abs) = static_cast<unsigned short>(static_cast<unsigned int>(__HALF_TO_CUS(v)) & 0x00007FFFU);
2018
- if (__hlt(abs, den) && (!(__HALF_TO_CUS(abs) == 0x0000U))) {
2019
- const float err = __fmaf_rn(-fb, fv, fa);
2020
- fv = __fmaf_rn(rcp, err, fv);
2021
- v = __float2half(fv);
2022
- }
2023
- return v;
2024
- }
2025
-
2026
- /******************************************************************************
2027
- * __half2 functions *
2028
- ******************************************************************************/
2029
- #define __SPEC_CASE2(i,r, spc, ulp) \
2030
- "{.reg.b32 spc, ulp, p;\n"\
2031
- " mov.b32 spc," __CUDA_FP16_STRINGIFY(spc) ";\n"\
2032
- " mov.b32 ulp," __CUDA_FP16_STRINGIFY(ulp) ";\n"\
2033
- " set.eq.f16x2.f16x2 p," __CUDA_FP16_STRINGIFY(i) ", spc;\n"\
2034
- " fma.rn.f16x2 " __CUDA_FP16_STRINGIFY(r) ",p,ulp," __CUDA_FP16_STRINGIFY(r) ";\n}\n"
2035
- #define __SPEC_CASE(i,r, spc, ulp) \
2036
- "{.reg.b16 spc, ulp, p;\n"\
2037
- " mov.b16 spc," __CUDA_FP16_STRINGIFY(spc) ";\n"\
2038
- " mov.b16 ulp," __CUDA_FP16_STRINGIFY(ulp) ";\n"\
2039
- " set.eq.f16.f16 p," __CUDA_FP16_STRINGIFY(i) ", spc;\n"\
2040
- " fma.rn.f16 " __CUDA_FP16_STRINGIFY(r) ",p,ulp," __CUDA_FP16_STRINGIFY(r) ";\n}\n"
2041
- #define __APPROX_FCAST(fun) /* do */ {\
2042
- __half val;\
2043
- asm("{.reg.b32 f; \n"\
2044
- " .reg.b16 r; \n"\
2045
- " mov.b16 r,%1; \n"\
2046
- " cvt.f32.f16 f,r; \n"\
2047
- " " __CUDA_FP16_STRINGIFY(fun) ".approx.ftz.f32 f,f; \n"\
2048
- " cvt.rn.f16.f32 r,f; \n"\
2049
- " mov.b16 %0,r; \n"\
2050
- "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));\
2051
- return val;\
2052
- } /* while(0) */
2053
- #define __APPROX_FCAST2(fun) /* do */ {\
2054
- __half2 val;\
2055
- asm("{.reg.b16 hl, hu; \n"\
2056
- " .reg.b32 fl, fu; \n"\
2057
- " mov.b32 {hl, hu}, %1; \n"\
2058
- " cvt.f32.f16 fl, hl; \n"\
2059
- " cvt.f32.f16 fu, hu; \n"\
2060
- " " __CUDA_FP16_STRINGIFY(fun) ".approx.ftz.f32 fl, fl; \n"\
2061
- " " __CUDA_FP16_STRINGIFY(fun) ".approx.ftz.f32 fu, fu; \n"\
2062
- " cvt.rn.f16.f32 hl, fl; \n"\
2063
- " cvt.rn.f16.f32 hu, fu; \n"\
2064
- " mov.b32 %0, {hl, hu}; \n"\
2065
- "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); \
2066
- return val;\
2067
- } /* while(0) */
2068
- static __device__ __forceinline__ float __float_simpl_sinf(float a);
2069
- static __device__ __forceinline__ float __float_simpl_cosf(float a);
2070
- __CUDA_FP16_DECL__ __half hsin(const __half a) {
2071
- const float sl = __float_simpl_sinf(__half2float(a));
2072
- __half r = __float2half_rn(sl);
2073
- asm("{\n\t"
2074
- " .reg.b16 i,r,t; \n\t"
2075
- " mov.b16 r, %0; \n\t"
2076
- " mov.b16 i, %1; \n\t"
2077
- " and.b16 t, r, 0x8000U; \n\t"
2078
- " abs.f16 r, r; \n\t"
2079
- " abs.f16 i, i; \n\t"
2080
- __SPEC_CASE(i, r, 0X32B3U, 0x0800U)
2081
- __SPEC_CASE(i, r, 0X5CB0U, 0x9000U)
2082
- " or.b16 r,r,t; \n\t"
2083
- " mov.b16 %0, r; \n"
2084
- "}\n" : "+h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)));
2085
- return r;
2086
- }
2087
- __CUDA_FP16_DECL__ __half2 h2sin(const __half2 a) {
2088
- const float sl = __float_simpl_sinf(__half2float(a.x));
2089
- const float sh = __float_simpl_sinf(__half2float(a.y));
2090
- __half2 r = __floats2half2_rn(sl, sh);
2091
- asm("{\n\t"
2092
- " .reg.b32 i,r,t; \n\t"
2093
- " mov.b32 r, %0; \n\t"
2094
- " mov.b32 i, %1; \n\t"
2095
- " and.b32 t, r, 0x80008000U; \n\t"
2096
- " abs.f16x2 r, r; \n\t"
2097
- " abs.f16x2 i, i; \n\t"
2098
- __SPEC_CASE2(i, r, 0X32B332B3U, 0x08000800U)
2099
- __SPEC_CASE2(i, r, 0X5CB05CB0U, 0x90009000U)
2100
- " or.b32 r, r, t; \n\t"
2101
- " mov.b32 %0, r; \n"
2102
- "}\n" : "+r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)));
2103
- return r;
2104
- }
2105
- __CUDA_FP16_DECL__ __half hcos(const __half a) {
2106
- const float cl = __float_simpl_cosf(__half2float(a));
2107
- __half r = __float2half_rn(cl);
2108
- asm("{\n\t"
2109
- " .reg.b16 i,r; \n\t"
2110
- " mov.b16 r, %0; \n\t"
2111
- " mov.b16 i, %1; \n\t"
2112
- " abs.f16 i, i; \n\t"
2113
- __SPEC_CASE(i, r, 0X2B7CU, 0x1000U)
2114
- " mov.b16 %0, r; \n"
2115
- "}\n" : "+h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)));
2116
- return r;
2117
- }
2118
- __CUDA_FP16_DECL__ __half2 h2cos(const __half2 a) {
2119
- const float cl = __float_simpl_cosf(__half2float(a.x));
2120
- const float ch = __float_simpl_cosf(__half2float(a.y));
2121
- __half2 r = __floats2half2_rn(cl, ch);
2122
- asm("{\n\t"
2123
- " .reg.b32 i,r; \n\t"
2124
- " mov.b32 r, %0; \n\t"
2125
- " mov.b32 i, %1; \n\t"
2126
- " abs.f16x2 i, i; \n\t"
2127
- __SPEC_CASE2(i, r, 0X2B7C2B7CU, 0x10001000U)
2128
- " mov.b32 %0, r; \n"
2129
- "}\n" : "+r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)));
2130
- return r;
2131
- }
2132
- static __device__ __forceinline__ float __internal_trig_reduction_kernel(const float a, unsigned int *const quadrant)
2133
- {
2134
- const float ar = __fmaf_rn(a, 0.636619772F, 12582912.0F);
2135
- const unsigned q = __float_as_uint(ar);
2136
- const float j = __fsub_rn(ar, 12582912.0F);
2137
- float t = __fmaf_rn(j, -1.5707962512969971e+000F, a);
2138
- t = __fmaf_rn(j, -7.5497894158615964e-008F, t);
2139
- *quadrant = q;
2140
- return t;
2141
- }
2142
- static __device__ __forceinline__ float __internal_sin_cos_kernel(const float x, const unsigned int i)
2143
- {
2144
- float z;
2145
- const float x2 = x*x;
2146
- float a8;
2147
- float a6;
2148
- float a4;
2149
- float a2;
2150
- float a1;
2151
- float a0;
2152
-
2153
- if ((i & 1U) != 0U) {
2154
- // cos
2155
- a8 = 2.44331571e-5F;
2156
- a6 = -1.38873163e-3F;
2157
- a4 = 4.16666457e-2F;
2158
- a2 = -5.00000000e-1F;
2159
- a1 = x2;
2160
- a0 = 1.0F;
2161
- }
2162
- else {
2163
- // sin
2164
- a8 = -1.95152959e-4F;
2165
- a6 = 8.33216087e-3F;
2166
- a4 = -1.66666546e-1F;
2167
- a2 = 0.0F;
2168
- a1 = x;
2169
- a0 = x;
2170
- }
2171
-
2172
- z = __fmaf_rn(a8, x2, a6);
2173
- z = __fmaf_rn(z, x2, a4);
2174
- z = __fmaf_rn(z, x2, a2);
2175
- z = __fmaf_rn(z, a1, a0);
2176
-
2177
- if ((i & 2U) != 0U) {
2178
- z = -z;
2179
- }
2180
- return z;
2181
- }
2182
- static __device__ __forceinline__ float __float_simpl_sinf(float a)
2183
- {
2184
- float z;
2185
- unsigned i;
2186
- a = __internal_trig_reduction_kernel(a, &i);
2187
- z = __internal_sin_cos_kernel(a, i);
2188
- return z;
2189
- }
2190
- static __device__ __forceinline__ float __float_simpl_cosf(float a)
2191
- {
2192
- float z;
2193
- unsigned i;
2194
- a = __internal_trig_reduction_kernel(a, &i);
2195
- z = __internal_sin_cos_kernel(a, (i & 0x3U) + 1U);
2196
- return z;
2197
- }
2198
-
2199
- __CUDA_FP16_DECL__ __half hexp(const __half a) {
2200
- __half val;
2201
- asm("{.reg.b32 f, C, nZ; \n"
2202
- " .reg.b16 h,r; \n"
2203
- " mov.b16 h,%1; \n"
2204
- " cvt.f32.f16 f,h; \n"
2205
- " mov.b32 C, 0x3fb8aa3bU; \n"
2206
- " mov.b32 nZ, 0x80000000U;\n"
2207
- " fma.rn.f32 f,f,C,nZ; \n"
2208
- " ex2.approx.ftz.f32 f,f; \n"
2209
- " cvt.rn.f16.f32 r,f; \n"
2210
- __SPEC_CASE(h, r, 0X1F79U, 0x9400U)
2211
- __SPEC_CASE(h, r, 0X25CFU, 0x9400U)
2212
- __SPEC_CASE(h, r, 0XC13BU, 0x0400U)
2213
- __SPEC_CASE(h, r, 0XC1EFU, 0x0200U)
2214
- " mov.b16 %0,r; \n"
2215
- "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
2216
- return val;
2217
- }
2218
- __CUDA_FP16_DECL__ __half2 h2exp(const __half2 a) {
2219
- __half2 val;
2220
- asm("{.reg.b16 hl, hu; \n"
2221
- " .reg.b32 h,r,fl,fu,C,nZ; \n"
2222
- " mov.b32 {hl, hu}, %1; \n"
2223
- " mov.b32 h, %1; \n"
2224
- " cvt.f32.f16 fl, hl; \n"
2225
- " cvt.f32.f16 fu, hu; \n"
2226
- " mov.b32 C, 0x3fb8aa3bU; \n"
2227
- " mov.b32 nZ, 0x80000000U;\n"
2228
- " fma.rn.f32 fl,fl,C,nZ; \n"
2229
- " fma.rn.f32 fu,fu,C,nZ; \n"
2230
- " ex2.approx.ftz.f32 fl, fl; \n"
2231
- " ex2.approx.ftz.f32 fu, fu; \n"
2232
- " cvt.rn.f16.f32 hl, fl; \n"
2233
- " cvt.rn.f16.f32 hu, fu; \n"
2234
- " mov.b32 r, {hl, hu}; \n"
2235
- __SPEC_CASE2(h, r, 0X1F791F79U, 0x94009400U)
2236
- __SPEC_CASE2(h, r, 0X25CF25CFU, 0x94009400U)
2237
- __SPEC_CASE2(h, r, 0XC13BC13BU, 0x04000400U)
2238
- __SPEC_CASE2(h, r, 0XC1EFC1EFU, 0x02000200U)
2239
- " mov.b32 %0, r; \n"
2240
- "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
2241
- return val;
2242
- }
2243
- __CUDA_FP16_DECL__ __half hexp2(const __half a) {
2244
- __half val;
2245
- asm("{.reg.b32 f, ULP; \n"
2246
- " .reg.b16 r; \n"
2247
- " mov.b16 r,%1; \n"
2248
- " cvt.f32.f16 f,r; \n"
2249
- " ex2.approx.ftz.f32 f,f; \n"
2250
- " mov.b32 ULP, 0x33800000U;\n"
2251
- " fma.rn.f32 f,f,ULP,f; \n"
2252
- " cvt.rn.f16.f32 r,f; \n"
2253
- " mov.b16 %0,r; \n"
2254
- "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
2255
- return val;
2256
- }
2257
- __CUDA_FP16_DECL__ __half2 h2exp2(const __half2 a) {
2258
- __half2 val;
2259
- asm("{.reg.b16 hl, hu; \n"
2260
- " .reg.b32 fl, fu, ULP; \n"
2261
- " mov.b32 {hl, hu}, %1; \n"
2262
- " cvt.f32.f16 fl, hl; \n"
2263
- " cvt.f32.f16 fu, hu; \n"
2264
- " ex2.approx.ftz.f32 fl, fl; \n"
2265
- " ex2.approx.ftz.f32 fu, fu; \n"
2266
- " mov.b32 ULP, 0x33800000U;\n"
2267
- " fma.rn.f32 fl,fl,ULP,fl; \n"
2268
- " fma.rn.f32 fu,fu,ULP,fu; \n"
2269
- " cvt.rn.f16.f32 hl, fl; \n"
2270
- " cvt.rn.f16.f32 hu, fu; \n"
2271
- " mov.b32 %0, {hl, hu}; \n"
2272
- "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
2273
- return val;
2274
- }
2275
- __CUDA_FP16_DECL__ __half hexp10(const __half a) {
2276
- __half val;
2277
- asm("{.reg.b16 h,r; \n"
2278
- " .reg.b32 f, C, nZ; \n"
2279
- " mov.b16 h, %1; \n"
2280
- " cvt.f32.f16 f, h; \n"
2281
- " mov.b32 C, 0x40549A78U; \n"
2282
- " mov.b32 nZ, 0x80000000U;\n"
2283
- " fma.rn.f32 f,f,C,nZ; \n"
2284
- " ex2.approx.ftz.f32 f, f; \n"
2285
- " cvt.rn.f16.f32 r, f; \n"
2286
- __SPEC_CASE(h, r, 0x34DEU, 0x9800U)
2287
- __SPEC_CASE(h, r, 0x9766U, 0x9000U)
2288
- __SPEC_CASE(h, r, 0x9972U, 0x1000U)
2289
- __SPEC_CASE(h, r, 0xA5C4U, 0x1000U)
2290
- __SPEC_CASE(h, r, 0xBF0AU, 0x8100U)
2291
- " mov.b16 %0, r; \n"
2292
- "}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
2293
- return val;
2294
- }
2295
- __CUDA_FP16_DECL__ __half2 h2exp10(const __half2 a) {
2296
- __half2 val;
2297
- asm("{.reg.b16 hl, hu; \n"
2298
- " .reg.b32 h,r,fl,fu,C,nZ; \n"
2299
- " mov.b32 {hl, hu}, %1; \n"
2300
- " mov.b32 h, %1; \n"
2301
- " cvt.f32.f16 fl, hl; \n"
2302
- " cvt.f32.f16 fu, hu; \n"
2303
- " mov.b32 C, 0x40549A78U; \n"
2304
- " mov.b32 nZ, 0x80000000U;\n"
2305
- " fma.rn.f32 fl,fl,C,nZ; \n"
2306
- " fma.rn.f32 fu,fu,C,nZ; \n"
2307
- " ex2.approx.ftz.f32 fl, fl; \n"
2308
- " ex2.approx.ftz.f32 fu, fu; \n"
2309
- " cvt.rn.f16.f32 hl, fl; \n"
2310
- " cvt.rn.f16.f32 hu, fu; \n"
2311
- " mov.b32 r, {hl, hu}; \n"
2312
- __SPEC_CASE2(h, r, 0x34DE34DEU, 0x98009800U)
2313
- __SPEC_CASE2(h, r, 0x97669766U, 0x90009000U)
2314
- __SPEC_CASE2(h, r, 0x99729972U, 0x10001000U)
2315
- __SPEC_CASE2(h, r, 0xA5C4A5C4U, 0x10001000U)
2316
- __SPEC_CASE2(h, r, 0xBF0ABF0AU, 0x81008100U)
2317
- " mov.b32 %0, r; \n"
2318
- "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
2319
- return val;
2320
- }
2321
- __CUDA_FP16_DECL__ __half hlog2(const __half a) {
2322
- __half val;
2323
- asm("{.reg.b16 h, r; \n"
2324
- " .reg.b32 f; \n"
2325
- " mov.b16 h, %1; \n"
2326
- " cvt.f32.f16 f, h; \n"
2327
- " lg2.approx.ftz.f32 f, f; \n"
2328
- " cvt.rn.f16.f32 r, f; \n"
2329
- __SPEC_CASE(r, r, 0xA2E2U, 0x8080U)
2330
- __SPEC_CASE(r, r, 0xBF46U, 0x9400U)
2331
- " mov.b16 %0, r; \n"
2332
- "}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
2333
- return val;
2334
- }
2335
- __CUDA_FP16_DECL__ __half2 h2log2(const __half2 a) {
2336
- __half2 val;
2337
- asm("{.reg.b16 hl, hu; \n"
2338
- " .reg.b32 fl, fu, r, p; \n"
2339
- " mov.b32 {hl, hu}, %1; \n"
2340
- " cvt.f32.f16 fl, hl; \n"
2341
- " cvt.f32.f16 fu, hu; \n"
2342
- " lg2.approx.ftz.f32 fl, fl; \n"
2343
- " lg2.approx.ftz.f32 fu, fu; \n"
2344
- " cvt.rn.f16.f32 hl, fl; \n"
2345
- " cvt.rn.f16.f32 hu, fu; \n"
2346
- " mov.b32 r, {hl, hu}; \n"
2347
- __SPEC_CASE2(r, r, 0xA2E2A2E2U, 0x80808080U)
2348
- __SPEC_CASE2(r, r, 0xBF46BF46U, 0x94009400U)
2349
- " mov.b32 %0, r; \n"
2350
- "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
2351
- return val;
2352
- }
2353
- __CUDA_FP16_DECL__ __half hlog(const __half a) {
2354
- __half val;
2355
- asm("{.reg.b32 f, C; \n"
2356
- " .reg.b16 r,h; \n"
2357
- " mov.b16 h,%1; \n"
2358
- " cvt.f32.f16 f,h; \n"
2359
- " lg2.approx.ftz.f32 f,f; \n"
2360
- " mov.b32 C, 0x3f317218U; \n"
2361
- " mul.f32 f,f,C; \n"
2362
- " cvt.rn.f16.f32 r,f; \n"
2363
- __SPEC_CASE(h, r, 0X160DU, 0x9C00U)
2364
- __SPEC_CASE(h, r, 0X3BFEU, 0x8010U)
2365
- __SPEC_CASE(h, r, 0X3C0BU, 0x8080U)
2366
- __SPEC_CASE(h, r, 0X6051U, 0x1C00U)
2367
- " mov.b16 %0,r; \n"
2368
- "}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
2369
- return val;
2370
- }
2371
- __CUDA_FP16_DECL__ __half2 h2log(const __half2 a) {
2372
- __half2 val;
2373
- asm("{.reg.b16 hl, hu; \n"
2374
- " .reg.b32 r, fl, fu, C, h; \n"
2375
- " mov.b32 {hl, hu}, %1; \n"
2376
- " mov.b32 h, %1; \n"
2377
- " cvt.f32.f16 fl, hl; \n"
2378
- " cvt.f32.f16 fu, hu; \n"
2379
- " lg2.approx.ftz.f32 fl, fl; \n"
2380
- " lg2.approx.ftz.f32 fu, fu; \n"
2381
- " mov.b32 C, 0x3f317218U; \n"
2382
- " mul.f32 fl,fl,C; \n"
2383
- " mul.f32 fu,fu,C; \n"
2384
- " cvt.rn.f16.f32 hl, fl; \n"
2385
- " cvt.rn.f16.f32 hu, fu; \n"
2386
- " mov.b32 r, {hl, hu}; \n"
2387
- __SPEC_CASE2(h, r, 0X160D160DU, 0x9C009C00U)
2388
- __SPEC_CASE2(h, r, 0X3BFE3BFEU, 0x80108010U)
2389
- __SPEC_CASE2(h, r, 0X3C0B3C0BU, 0x80808080U)
2390
- __SPEC_CASE2(h, r, 0X60516051U, 0x1C001C00U)
2391
- " mov.b32 %0, r; \n"
2392
- "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
2393
- return val;
2394
- }
2395
- __CUDA_FP16_DECL__ __half hlog10(const __half a) {
2396
- __half val;
2397
- asm("{.reg.b16 h, r; \n"
2398
- " .reg.b32 f, C; \n"
2399
- " mov.b16 h, %1; \n"
2400
- " cvt.f32.f16 f, h; \n"
2401
- " lg2.approx.ftz.f32 f, f; \n"
2402
- " mov.b32 C, 0x3E9A209BU; \n"
2403
- " mul.f32 f,f,C; \n"
2404
- " cvt.rn.f16.f32 r, f; \n"
2405
- __SPEC_CASE(h, r, 0x338FU, 0x1000U)
2406
- __SPEC_CASE(h, r, 0x33F8U, 0x9000U)
2407
- __SPEC_CASE(h, r, 0x57E1U, 0x9800U)
2408
- __SPEC_CASE(h, r, 0x719DU, 0x9C00U)
2409
- " mov.b16 %0, r; \n"
2410
- "}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
2411
- return val;
2412
- }
2413
- __CUDA_FP16_DECL__ __half2 h2log10(const __half2 a) {
2414
- __half2 val;
2415
- asm("{.reg.b16 hl, hu; \n"
2416
- " .reg.b32 r, fl, fu, C, h; \n"
2417
- " mov.b32 {hl, hu}, %1; \n"
2418
- " mov.b32 h, %1; \n"
2419
- " cvt.f32.f16 fl, hl; \n"
2420
- " cvt.f32.f16 fu, hu; \n"
2421
- " lg2.approx.ftz.f32 fl, fl; \n"
2422
- " lg2.approx.ftz.f32 fu, fu; \n"
2423
- " mov.b32 C, 0x3E9A209BU; \n"
2424
- " mul.f32 fl,fl,C; \n"
2425
- " mul.f32 fu,fu,C; \n"
2426
- " cvt.rn.f16.f32 hl, fl; \n"
2427
- " cvt.rn.f16.f32 hu, fu; \n"
2428
- " mov.b32 r, {hl, hu}; \n"
2429
- __SPEC_CASE2(h, r, 0x338F338FU, 0x10001000U)
2430
- __SPEC_CASE2(h, r, 0x33F833F8U, 0x90009000U)
2431
- __SPEC_CASE2(h, r, 0x57E157E1U, 0x98009800U)
2432
- __SPEC_CASE2(h, r, 0x719D719DU, 0x9C009C00U)
2433
- " mov.b32 %0, r; \n"
2434
- "}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
2435
- return val;
2436
- }
2437
- #undef __SPEC_CASE2
2438
- #undef __SPEC_CASE
2439
- __CUDA_FP16_DECL__ __half2 h2rcp(const __half2 a) {
2440
- __APPROX_FCAST2(rcp)
2441
- }
2442
- __CUDA_FP16_DECL__ __half hrcp(const __half a) {
2443
- __APPROX_FCAST(rcp)
2444
- }
2445
- __CUDA_FP16_DECL__ __half2 h2rsqrt(const __half2 a) {
2446
- __APPROX_FCAST2(rsqrt)
2447
- }
2448
- __CUDA_FP16_DECL__ __half hrsqrt(const __half a) {
2449
- __APPROX_FCAST(rsqrt)
2450
- }
2451
- __CUDA_FP16_DECL__ __half2 h2sqrt(const __half2 a) {
2452
- __APPROX_FCAST2(sqrt)
2453
- }
2454
- __CUDA_FP16_DECL__ __half hsqrt(const __half a) {
2455
- __APPROX_FCAST(sqrt)
2456
- }
2457
- #undef __APPROX_FCAST
2458
- #undef __APPROX_FCAST2
2459
- __CUDA_FP16_DECL__ __half2 __hisnan2(const __half2 a)
2460
- {
2461
- __half2 r;
2462
- asm("{set.nan.f16x2.f16x2 %0,%1,%2;\n}"
2463
- :"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(a)));
2464
- return r;
2465
- }
2466
- __CUDA_FP16_DECL__ bool __hisnan(const __half a)
2467
- {
2468
- __half r;
2469
- asm("{set.nan.f16.f16 %0,%1,%2;\n}"
2470
- :"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(a)));
2471
- return __HALF_TO_CUS(r) != 0U;
2472
- }
2473
- __CUDA_FP16_DECL__ __half2 __hneg2(const __half2 a)
2474
- {
2475
- __half2 r;
2476
- asm("{neg.f16x2 %0,%1;\n}"
2477
- :"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)));
2478
- return r;
2479
- }
2480
- __CUDA_FP16_DECL__ __half __hneg(const __half a)
2481
- {
2482
- __half r;
2483
- asm("{neg.f16 %0,%1;\n}"
2484
- :"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)));
2485
- return r;
2486
- }
2487
- __CUDA_FP16_DECL__ __half2 __habs2(const __half2 a)
2488
- {
2489
- __half2 r;
2490
- asm("{abs.f16x2 %0,%1;\n}"
2491
- :"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)));
2492
- return r;
2493
- }
2494
- __CUDA_FP16_DECL__ __half __habs(const __half a)
2495
- {
2496
- __half r;
2497
- asm("{abs.f16 %0,%1;\n}"
2498
- :"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)));
2499
- return r;
2500
- }
2501
-
2502
- __CUDA_FP16_DECL__ __half2 __hcmadd(const __half2 a, const __half2 b, const __half2 c)
2503
- {
2504
- // fast version of complex multiply-accumulate
2505
- // (a.re, a.im) * (b.re, b.im) + (c.re, c.im)
2506
- // acc.re = (c.re + a.re*b.re) - a.im*b.im
2507
- // acc.im = (c.im + a.re*b.im) + a.im*b.re
2508
- __half real_tmp = __hfma(a.x, b.x, c.x);
2509
- __half img_tmp = __hfma(a.x, b.y, c.y);
2510
- real_tmp = __hfma(__hneg(a.y), b.y, real_tmp);
2511
- img_tmp = __hfma(a.y, b.x, img_tmp);
2512
- return make_half2(real_tmp, img_tmp);
2513
- }
2514
-
2515
- #endif /*!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)*/
2516
-
2517
- #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)
2518
- __CUDA_FP16_DECL__ __half __hmax_nan(const __half a, const __half b)
2519
- {
2520
- __BINARY_OP_HALF_MACRO(max.NaN)
2521
- }
2522
- __CUDA_FP16_DECL__ __half __hmin_nan(const __half a, const __half b)
2523
- {
2524
- __BINARY_OP_HALF_MACRO(min.NaN)
2525
- }
2526
- __CUDA_FP16_DECL__ __half __hfma_relu(const __half a, const __half b, const __half c)
2527
- {
2528
- __TERNARY_OP_HALF_MACRO(fma.rn.relu)
2529
- }
2530
-
2531
- __CUDA_FP16_DECL__ __half2 __hmax2_nan(const __half2 a, const __half2 b)
2532
- {
2533
- __BINARY_OP_HALF2_MACRO(max.NaN)
2534
- }
2535
- __CUDA_FP16_DECL__ __half2 __hmin2_nan(const __half2 a, const __half2 b)
2536
- {
2537
- __BINARY_OP_HALF2_MACRO(min.NaN)
2538
- }
2539
- __CUDA_FP16_DECL__ __half2 __hfma2_relu(const __half2 a, const __half2 b, const __half2 c)
2540
- {
2541
- __TERNARY_OP_HALF2_MACRO(fma.rn.relu)
2542
- }
2543
- #endif /*!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 800)*/
2544
-
2545
- /* Define __PTR for atomicAdd prototypes below, undef after done */
2546
- #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
2547
- #define __PTR "l"
2548
- #else
2549
- #define __PTR "r"
2550
- #endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/
2551
-
2552
- #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
2553
-
2554
- __CUDA_FP16_DECL__ __half2 atomicAdd(__half2 *const address, const __half2 val) {
2555
- __half2 r;
2556
- asm volatile ("{ atom.add.noftz.f16x2 %0,[%1],%2; }\n"
2557
- : "=r"(__HALF2_TO_UI(r)) : __PTR(address), "r"(__HALF2_TO_CUI(val))
2558
- : "memory");
2559
- return r;
2560
- }
2561
-
2562
- #endif /*!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600*/
2563
-
2564
- #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
2565
-
2566
- __CUDA_FP16_DECL__ __half atomicAdd(__half *const address, const __half val) {
2567
- __half r;
2568
- asm volatile ("{ atom.add.noftz.f16 %0,[%1],%2; }\n"
2569
- : "=h"(__HALF_TO_US(r))
2570
- : __PTR(address), "h"(__HALF_TO_CUS(val))
2571
- : "memory");
2572
- return r;
2573
- }
2574
-
2575
- #endif /*!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700*/
2576
-
2577
- #undef __PTR
2578
-
2579
- #undef __CUDA_FP16_DECL__
2580
- #endif /* defined(__CUDACC__) */
2581
- #endif /* defined(__cplusplus) */
2582
-
2583
- #undef __TERNARY_OP_HALF2_MACRO
2584
- #undef __TERNARY_OP_HALF_MACRO
2585
- #undef __BINARY_OP_HALF2_MACRO
2586
- #undef __BINARY_OP_HALF_MACRO
2587
-
2588
- #undef __CUDA_HOSTDEVICE_FP16_DECL__
2589
- #undef __CUDA_FP16_DECL__
2590
-
2591
- #undef __HALF_TO_US
2592
- #undef __HALF_TO_CUS
2593
- #undef __HALF2_TO_UI
2594
- #undef __HALF2_TO_CUI
2595
-
2596
- /* Define first-class types "half" and "half2", unless user specifies otherwise via "#define CUDA_NO_HALF" */
2597
- /* C cannot ever have these types defined here, because __half and __half2 are C++ classes */
2598
- #if defined(__cplusplus) && !defined(CUDA_NO_HALF)
2599
- typedef __half half;
2600
- typedef __half2 half2;
2601
- // for consistency with __nv_bfloat16
2602
- typedef __half __nv_half;
2603
- typedef __half2 __nv_half2;
2604
- typedef __half_raw __nv_half_raw;
2605
- typedef __half2_raw __nv_half2_raw;
2606
- typedef __half nv_half;
2607
- typedef __half2 nv_half2;
2608
- #endif /* defined(__cplusplus) && !defined(CUDA_NO_HALF) */
2609
-
2610
- #if defined(__CPP_VERSION_AT_LEAST_11_FP16)
2611
- #undef __CPP_VERSION_AT_LEAST_11_FP16
2612
- #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */
2613
-
2614
- #endif /* end of include guard: __CUDA_FP16_HPP__ */