numba-cuda 0.9.0__py3-none-any.whl → 0.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- numba_cuda/VERSION +1 -1
- numba_cuda/numba/cuda/compiler.py +14 -1
- numba_cuda/numba/cuda/cuda_bf16.py +5155 -0
- numba_cuda/numba/cuda/cuda_paths.py +2 -0
- numba_cuda/numba/cuda/cudadecl.py +0 -42
- numba_cuda/numba/cuda/cudadrv/linkable_code.py +11 -2
- numba_cuda/numba/cuda/cudadrv/nvrtc.py +10 -3
- numba_cuda/numba/cuda/cudaimpl.py +0 -63
- numba_cuda/numba/cuda/debuginfo.py +92 -2
- numba_cuda/numba/cuda/decorators.py +13 -1
- numba_cuda/numba/cuda/device_init.py +4 -5
- numba_cuda/numba/cuda/extending.py +54 -0
- numba_cuda/numba/cuda/include/11/cuda_bf16.h +3749 -0
- numba_cuda/numba/cuda/include/11/cuda_bf16.hpp +2683 -0
- numba_cuda/numba/cuda/{cuda_fp16.h → include/11/cuda_fp16.h} +550 -387
- numba_cuda/numba/cuda/{cuda_fp16.hpp → include/11/cuda_fp16.hpp} +465 -316
- numba_cuda/numba/cuda/include/12/cuda_bf16.h +5118 -0
- numba_cuda/numba/cuda/include/12/cuda_bf16.hpp +3865 -0
- numba_cuda/numba/cuda/include/12/cuda_fp16.h +5363 -0
- numba_cuda/numba/cuda/include/12/cuda_fp16.hpp +3483 -0
- numba_cuda/numba/cuda/intrinsic_wrapper.py +0 -39
- numba_cuda/numba/cuda/intrinsics.py +172 -1
- numba_cuda/numba/cuda/lowering.py +43 -0
- numba_cuda/numba/cuda/stubs.py +0 -11
- numba_cuda/numba/cuda/target.py +28 -0
- numba_cuda/numba/cuda/tests/cudapy/extensions_usecases.py +4 -2
- numba_cuda/numba/cuda/tests/cudapy/test_array_args.py +1 -1
- numba_cuda/numba/cuda/tests/cudapy/test_bfloat16_bindings.py +257 -0
- numba_cuda/numba/cuda/tests/cudapy/test_blackscholes.py +1 -1
- numba_cuda/numba/cuda/tests/cudapy/test_debuginfo.py +46 -0
- numba_cuda/numba/cuda/tests/cudapy/test_enums.py +18 -0
- numba_cuda/numba/cuda/tests/cudapy/test_extending.py +4 -2
- numba_cuda/numba/cuda/tests/cudapy/test_inline.py +59 -0
- numba_cuda/numba/cuda/tests/cudapy/test_laplace.py +1 -1
- numba_cuda/numba/cuda/tests/cudapy/test_warp_ops.py +50 -5
- numba_cuda/numba/cuda/vector_types.py +3 -1
- numba_cuda/numba/cuda/vectorizers.py +1 -1
- {numba_cuda-0.9.0.dist-info → numba_cuda-0.10.0.dist-info}/METADATA +1 -1
- {numba_cuda-0.9.0.dist-info → numba_cuda-0.10.0.dist-info}/RECORD +42 -32
- {numba_cuda-0.9.0.dist-info → numba_cuda-0.10.0.dist-info}/WHEEL +1 -1
- {numba_cuda-0.9.0.dist-info → numba_cuda-0.10.0.dist-info}/licenses/LICENSE +0 -0
- {numba_cuda-0.9.0.dist-info → numba_cuda-0.10.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,3483 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright 1993-2024 NVIDIA Corporation. All rights reserved.
|
3
|
+
*
|
4
|
+
* NOTICE TO LICENSEE:
|
5
|
+
*
|
6
|
+
* This source code and/or documentation ("Licensed Deliverables") are
|
7
|
+
* subject to NVIDIA intellectual property rights under U.S. and
|
8
|
+
* international Copyright laws.
|
9
|
+
*
|
10
|
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
11
|
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
12
|
+
* conditions of a form of NVIDIA software license agreement by and
|
13
|
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
14
|
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
15
|
+
* the contrary in the License Agreement, reproduction or disclosure
|
16
|
+
* of the Licensed Deliverables to any third party without the express
|
17
|
+
* written consent of NVIDIA is prohibited.
|
18
|
+
*
|
19
|
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
20
|
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
21
|
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
22
|
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
23
|
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
24
|
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
25
|
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
26
|
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
27
|
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
28
|
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
29
|
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
30
|
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
31
|
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
32
|
+
* OF THESE LICENSED DELIVERABLES.
|
33
|
+
*
|
34
|
+
* U.S. Government End Users. These Licensed Deliverables are a
|
35
|
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
36
|
+
* 1995), consisting of "commercial computer software" and "commercial
|
37
|
+
* computer software documentation" as such terms are used in 48
|
38
|
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
39
|
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
40
|
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
41
|
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
42
|
+
* only those rights set forth herein.
|
43
|
+
*
|
44
|
+
* Any use of the Licensed Deliverables in individual and commercial
|
45
|
+
* software must include, in the user documentation and internal
|
46
|
+
* comments to the code, the above Disclaimer and U.S. Government End
|
47
|
+
* Users Notice.
|
48
|
+
*/
|
49
|
+
|
50
|
+
#if !defined(__CUDA_FP16_HPP__)
|
51
|
+
#define __CUDA_FP16_HPP__
|
52
|
+
|
53
|
+
#if !defined(__CUDA_FP16_H__)
|
54
|
+
#error "Do not include this file directly. Instead, include cuda_fp16.h."
|
55
|
+
#endif
|
56
|
+
|
57
|
+
#if !defined(IF_DEVICE_OR_CUDACC)
|
58
|
+
#if defined(__CUDACC__)
|
59
|
+
#define IF_DEVICE_OR_CUDACC(d, c, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, d, c)
|
60
|
+
#else
|
61
|
+
#define IF_DEVICE_OR_CUDACC(d, c, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, d, f)
|
62
|
+
#endif
|
63
|
+
#endif
|
64
|
+
|
65
|
+
/* Macros for half & half2 binary arithmetic */
|
66
|
+
#define __BINARY_OP_HALF_MACRO(name) /* do */ {\
|
67
|
+
__half val; \
|
68
|
+
asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16 %0,%1,%2;\n}" \
|
69
|
+
:"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)),"h"(__HALF_TO_CUS(b))); \
|
70
|
+
return val; \
|
71
|
+
} /* while(0) */
|
72
|
+
#define __BINARY_OP_HALF2_MACRO(name) /* do */ {\
|
73
|
+
__half2 val; \
|
74
|
+
asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16x2 %0,%1,%2;\n}" \
|
75
|
+
:"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \
|
76
|
+
return val; \
|
77
|
+
} /* while(0) */
|
78
|
+
#define __TERNARY_OP_HALF_MACRO(name) /* do */ {\
|
79
|
+
__half val; \
|
80
|
+
asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16 %0,%1,%2,%3;\n}" \
|
81
|
+
:"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)),"h"(__HALF_TO_CUS(b)),"h"(__HALF_TO_CUS(c))); \
|
82
|
+
return val; \
|
83
|
+
} /* while(0) */
|
84
|
+
#define __TERNARY_OP_HALF2_MACRO(name) /* do */ {\
|
85
|
+
__half2 val; \
|
86
|
+
asm( "{" __CUDA_FP16_STRINGIFY(name) ".f16x2 %0,%1,%2,%3;\n}" \
|
87
|
+
:"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b)),"r"(__HALF2_TO_CUI(c))); \
|
88
|
+
return val; \
|
89
|
+
} /* while(0) */
|
90
|
+
|
91
|
+
/* All other definitions in this file are only visible to C++ compilers */
|
92
|
+
#if defined(__cplusplus)
|
93
|
+
|
94
|
+
/**
|
95
|
+
* \ingroup CUDA_MATH_INTRINSIC_HALF_CONSTANTS
|
96
|
+
* \brief Defines floating-point positive infinity value for the \p half data type
|
97
|
+
*/
|
98
|
+
#define CUDART_INF_FP16 __ushort_as_half((unsigned short)0x7C00U)
|
99
|
+
/**
|
100
|
+
* \ingroup CUDA_MATH_INTRINSIC_HALF_CONSTANTS
|
101
|
+
* \brief Defines canonical NaN value for the \p half data type
|
102
|
+
*/
|
103
|
+
#define CUDART_NAN_FP16 __ushort_as_half((unsigned short)0x7FFFU)
|
104
|
+
/**
|
105
|
+
* \ingroup CUDA_MATH_INTRINSIC_HALF_CONSTANTS
|
106
|
+
* \brief Defines a minimum representable (denormalized) value for the \p half data type
|
107
|
+
*/
|
108
|
+
#define CUDART_MIN_DENORM_FP16 __ushort_as_half((unsigned short)0x0001U)
|
109
|
+
/**
|
110
|
+
* \ingroup CUDA_MATH_INTRINSIC_HALF_CONSTANTS
|
111
|
+
* \brief Defines a maximum representable value for the \p half data type
|
112
|
+
*/
|
113
|
+
#define CUDART_MAX_NORMAL_FP16 __ushort_as_half((unsigned short)0x7BFFU)
|
114
|
+
/**
|
115
|
+
* \ingroup CUDA_MATH_INTRINSIC_HALF_CONSTANTS
|
116
|
+
* \brief Defines a negative zero value for the \p half data type
|
117
|
+
*/
|
118
|
+
#define CUDART_NEG_ZERO_FP16 __ushort_as_half((unsigned short)0x8000U)
|
119
|
+
/**
|
120
|
+
* \ingroup CUDA_MATH_INTRINSIC_HALF_CONSTANTS
|
121
|
+
* \brief Defines a positive zero value for the \p half data type
|
122
|
+
*/
|
123
|
+
#define CUDART_ZERO_FP16 __ushort_as_half((unsigned short)0x0000U)
|
124
|
+
/**
|
125
|
+
* \ingroup CUDA_MATH_INTRINSIC_HALF_CONSTANTS
|
126
|
+
* \brief Defines a value of 1.0 for the \p half data type
|
127
|
+
*/
|
128
|
+
#define CUDART_ONE_FP16 __ushort_as_half((unsigned short)0x3C00U)
|
129
|
+
|
130
|
+
#if !(defined __DOXYGEN_ONLY__)
|
131
|
+
|
132
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half &__half::operator=(const __half_raw &hr) { __x = hr.x; return *this; }
|
133
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ volatile __half &__half::operator=(const __half_raw &hr) volatile { __x = hr.x; return *this; }
|
134
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ volatile __half &__half::operator=(const volatile __half_raw &hr) volatile { __x = hr.x; return *this; }
|
135
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator __half_raw() const { __half_raw ret; ret.x = __x; return ret; }
|
136
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator __half_raw() const volatile { __half_raw ret; ret.x = __x; return ret; }
|
137
|
+
#if !defined(__CUDA_NO_HALF_CONVERSIONS__)
|
138
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator float() const { return __half2float(*this); }
|
139
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half &__half::operator=(const float f) { __x = __float2half(f).__x; return *this; }
|
140
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half &__half::operator=(const double f) { __x = __double2half(f).__x; return *this; }
|
141
|
+
#if !(defined __CUDA_FP16_DISABLE_IMPLICIT_INTEGER_CONVERTS_FOR_HOST_COMPILERS__) || (defined __CUDACC__)
|
142
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator signed char() const { return __half2char_rz(*this); }
|
143
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator unsigned char() const { return __half2uchar_rz(*this); }
|
144
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator char() const {
|
145
|
+
char value;
|
146
|
+
/* Suppress VS warning: warning C4127: conditional expression is constant */
|
147
|
+
#if defined(_MSC_VER) && !defined(__CUDA_ARCH__)
|
148
|
+
#pragma warning (push)
|
149
|
+
#pragma warning (disable: 4127)
|
150
|
+
#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */
|
151
|
+
if (((char)-1) < (char)0)
|
152
|
+
#if defined(_MSC_VER) && !defined(__CUDA_ARCH__)
|
153
|
+
#pragma warning (pop)
|
154
|
+
#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */
|
155
|
+
{
|
156
|
+
value = static_cast<char>(__half2char_rz(*this));
|
157
|
+
}
|
158
|
+
else
|
159
|
+
{
|
160
|
+
value = static_cast<char>(__half2uchar_rz(*this));
|
161
|
+
}
|
162
|
+
return value;
|
163
|
+
}
|
164
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator short() const { return __half2short_rz(*this); }
|
165
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator unsigned short() const { return __half2ushort_rz(*this); }
|
166
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator int() const { return __half2int_rz(*this); }
|
167
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator unsigned int() const { return __half2uint_rz(*this); }
|
168
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator long() const {
|
169
|
+
long retval;
|
170
|
+
/* Suppress VS warning: warning C4127: conditional expression is constant */
|
171
|
+
#if defined(_MSC_VER) && !defined(__CUDA_ARCH__)
|
172
|
+
#pragma warning (push)
|
173
|
+
#pragma warning (disable: 4127)
|
174
|
+
#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */
|
175
|
+
if (sizeof(long) == sizeof(long long))
|
176
|
+
#if defined(_MSC_VER) && !defined(__CUDA_ARCH__)
|
177
|
+
#pragma warning (pop)
|
178
|
+
#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */
|
179
|
+
{
|
180
|
+
retval = static_cast<long>(__half2ll_rz(*this));
|
181
|
+
}
|
182
|
+
else
|
183
|
+
{
|
184
|
+
retval = static_cast<long>(__half2int_rz(*this));
|
185
|
+
}
|
186
|
+
return retval;
|
187
|
+
}
|
188
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator unsigned long() const {
|
189
|
+
unsigned long retval;
|
190
|
+
/* Suppress VS warning: warning C4127: conditional expression is constant */
|
191
|
+
#if defined(_MSC_VER) && !defined(__CUDA_ARCH__)
|
192
|
+
#pragma warning (push)
|
193
|
+
#pragma warning (disable: 4127)
|
194
|
+
#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */
|
195
|
+
if (sizeof(unsigned long) == sizeof(unsigned long long))
|
196
|
+
#if defined(_MSC_VER) && !defined(__CUDA_ARCH__)
|
197
|
+
#pragma warning (pop)
|
198
|
+
#endif /* _MSC_VER && !defined(__CUDA_ARCH__) */
|
199
|
+
{
|
200
|
+
retval = static_cast<unsigned long>(__half2ull_rz(*this));
|
201
|
+
}
|
202
|
+
else
|
203
|
+
{
|
204
|
+
retval = static_cast<unsigned long>(__half2uint_rz(*this));
|
205
|
+
}
|
206
|
+
return retval;
|
207
|
+
}
|
208
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator long long() const { return __half2ll_rz(*this); }
|
209
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half::operator unsigned long long() const { return __half2ull_rz(*this); }
|
210
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half &__half::operator=(const short val) { __x = __short2half_rn(val).__x; return *this; }
|
211
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half &__half::operator=(const unsigned short val) { __x = __ushort2half_rn(val).__x; return *this; }
|
212
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half &__half::operator=(const int val) { __x = __int2half_rn(val).__x; return *this; }
|
213
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half &__half::operator=(const unsigned int val) { __x = __uint2half_rn(val).__x; return *this; }
|
214
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half &__half::operator=(const long long val) { __x = __ll2half_rn(val).__x; return *this; }
|
215
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half &__half::operator=(const unsigned long long val) { __x = __ull2half_rn(val).__x; return *this; }
|
216
|
+
|
217
|
+
#endif /* #if !(defined __CUDA_FP16_DISABLE_IMPLICIT_INTEGER_CONVERTS_FOR_HOST_COMPILERS__) || (defined __CUDACC__) */
|
218
|
+
#endif /* !defined(__CUDA_NO_HALF_CONVERSIONS__) */
|
219
|
+
#if !defined(__CUDA_NO_HALF_OPERATORS__)
|
220
|
+
/* Some basic arithmetic operations expected of a built-in */
|
221
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half operator+(const __half &lh, const __half &rh) { return __hadd(lh, rh); }
|
222
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half operator-(const __half &lh, const __half &rh) { return __hsub(lh, rh); }
|
223
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half operator*(const __half &lh, const __half &rh) { return __hmul(lh, rh); }
|
224
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half operator/(const __half &lh, const __half &rh) { return __hdiv(lh, rh); }
|
225
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half &operator+=(__half &lh, const __half &rh) { lh = __hadd(lh, rh); return lh; }
|
226
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half &operator-=(__half &lh, const __half &rh) { lh = __hsub(lh, rh); return lh; }
|
227
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half &operator*=(__half &lh, const __half &rh) { lh = __hmul(lh, rh); return lh; }
|
228
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half &operator/=(__half &lh, const __half &rh) { lh = __hdiv(lh, rh); return lh; }
|
229
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half &operator++(__half &h) { __half_raw one; one.x = 0x3C00U; h += one; return h; }
|
230
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half &operator--(__half &h) { __half_raw one; one.x = 0x3C00U; h -= one; return h; }
|
231
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half operator++(__half &h, const int ignored)
|
232
|
+
{
|
233
|
+
// ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators.
|
234
|
+
static_cast<void>(ignored);
|
235
|
+
|
236
|
+
const __half ret = h;
|
237
|
+
__half_raw one;
|
238
|
+
one.x = 0x3C00U;
|
239
|
+
h += one;
|
240
|
+
return ret;
|
241
|
+
}
|
242
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half operator--(__half &h, const int ignored)
|
243
|
+
{
|
244
|
+
// ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators.
|
245
|
+
static_cast<void>(ignored);
|
246
|
+
|
247
|
+
const __half ret = h;
|
248
|
+
__half_raw one;
|
249
|
+
one.x = 0x3C00U;
|
250
|
+
h -= one;
|
251
|
+
return ret;
|
252
|
+
}
|
253
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half operator+(const __half &h) { return h; }
|
254
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half operator-(const __half &h) { return __hneg(h); }
|
255
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ bool operator==(const __half &lh, const __half &rh) { return __heq(lh, rh); }
|
256
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ bool operator!=(const __half &lh, const __half &rh) { return __hneu(lh, rh); }
|
257
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ bool operator> (const __half &lh, const __half &rh) { return __hgt(lh, rh); }
|
258
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ bool operator< (const __half &lh, const __half &rh) { return __hlt(lh, rh); }
|
259
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ bool operator>=(const __half &lh, const __half &rh) { return __hge(lh, rh); }
|
260
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ bool operator<=(const __half &lh, const __half &rh) { return __hle(lh, rh); }
|
261
|
+
#endif /* !defined(__CUDA_NO_HALF_OPERATORS__) */
|
262
|
+
#if defined(__CPP_VERSION_AT_LEAST_11_FP16)
|
263
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half2 &__half2::operator=(const __half2 &&src) {
|
264
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
265
|
+
__HALF2_TO_UI(*this) = std::move(__HALF2_TO_CUI(src));
|
266
|
+
,
|
267
|
+
this->x = src.x;
|
268
|
+
this->y = src.y;
|
269
|
+
)
|
270
|
+
return *this;
|
271
|
+
}
|
272
|
+
#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */
|
273
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half2 &__half2::operator=(const __half2 &src) {
|
274
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
275
|
+
__HALF2_TO_UI(*this) = __HALF2_TO_CUI(src);
|
276
|
+
,
|
277
|
+
this->x = src.x;
|
278
|
+
this->y = src.y;
|
279
|
+
)
|
280
|
+
return *this;
|
281
|
+
}
|
282
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half2 &__half2::operator=(const __half2_raw &h2r) {
|
283
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
284
|
+
__HALF2_TO_UI(*this) = __HALF2_TO_CUI(h2r);
|
285
|
+
,
|
286
|
+
__half_raw tr;
|
287
|
+
tr.x = h2r.x;
|
288
|
+
this->x = static_cast<__half>(tr);
|
289
|
+
tr.x = h2r.y;
|
290
|
+
this->y = static_cast<__half>(tr);
|
291
|
+
)
|
292
|
+
return *this;
|
293
|
+
}
|
294
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_INLINE__ __half2::operator __half2_raw() const {
|
295
|
+
__half2_raw ret;
|
296
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
297
|
+
ret.x = 0U;
|
298
|
+
ret.y = 0U;
|
299
|
+
__HALF2_TO_UI(ret) = __HALF2_TO_CUI(*this);
|
300
|
+
,
|
301
|
+
ret.x = static_cast<__half_raw>(this->x).x;
|
302
|
+
ret.y = static_cast<__half_raw>(this->y).x;
|
303
|
+
)
|
304
|
+
return ret;
|
305
|
+
}
|
306
|
+
#if !defined(__CUDA_NO_HALF2_OPERATORS__)
|
307
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2 operator+(const __half2 &lh, const __half2 &rh) { return __hadd2(lh, rh); }
|
308
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2 operator-(const __half2 &lh, const __half2 &rh) { return __hsub2(lh, rh); }
|
309
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2 operator*(const __half2 &lh, const __half2 &rh) { return __hmul2(lh, rh); }
|
310
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2 operator/(const __half2 &lh, const __half2 &rh) { return __h2div(lh, rh); }
|
311
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2& operator+=(__half2 &lh, const __half2 &rh) { lh = __hadd2(lh, rh); return lh; }
|
312
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2& operator-=(__half2 &lh, const __half2 &rh) { lh = __hsub2(lh, rh); return lh; }
|
313
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2& operator*=(__half2 &lh, const __half2 &rh) { lh = __hmul2(lh, rh); return lh; }
|
314
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2& operator/=(__half2 &lh, const __half2 &rh) { lh = __h2div(lh, rh); return lh; }
|
315
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2 &operator++(__half2 &h) { __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hadd2(h, one); return h; }
|
316
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2 &operator--(__half2 &h) { __half2_raw one; one.x = 0x3C00U; one.y = 0x3C00U; h = __hsub2(h, one); return h; }
|
317
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2 operator++(__half2 &h, const int ignored)
|
318
|
+
{
|
319
|
+
// ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators.
|
320
|
+
static_cast<void>(ignored);
|
321
|
+
|
322
|
+
const __half2 ret = h;
|
323
|
+
__half2_raw one;
|
324
|
+
one.x = 0x3C00U;
|
325
|
+
one.y = 0x3C00U;
|
326
|
+
h = __hadd2(h, one);
|
327
|
+
return ret;
|
328
|
+
}
|
329
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2 operator--(__half2 &h, const int ignored)
|
330
|
+
{
|
331
|
+
// ignored on purpose. Parameter only needed to distinguish the function declaration from other types of operators.
|
332
|
+
static_cast<void>(ignored);
|
333
|
+
|
334
|
+
const __half2 ret = h;
|
335
|
+
__half2_raw one;
|
336
|
+
one.x = 0x3C00U;
|
337
|
+
one.y = 0x3C00U;
|
338
|
+
h = __hsub2(h, one);
|
339
|
+
return ret;
|
340
|
+
}
|
341
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2 operator+(const __half2 &h) { return h; }
|
342
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ __half2 operator-(const __half2 &h) { return __hneg2(h); }
|
343
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ bool operator==(const __half2 &lh, const __half2 &rh) { return __hbeq2(lh, rh); }
|
344
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ bool operator!=(const __half2 &lh, const __half2 &rh) { return __hbneu2(lh, rh); }
|
345
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ bool operator>(const __half2 &lh, const __half2 &rh) { return __hbgt2(lh, rh); }
|
346
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ bool operator<(const __half2 &lh, const __half2 &rh) { return __hblt2(lh, rh); }
|
347
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ bool operator>=(const __half2 &lh, const __half2 &rh) { return __hbge2(lh, rh); }
|
348
|
+
__CUDA_HOSTDEVICE__ __CUDA_FP16_FORCEINLINE__ bool operator<=(const __half2 &lh, const __half2 &rh) { return __hble2(lh, rh); }
|
349
|
+
#endif /* !defined(__CUDA_NO_HALF2_OPERATORS__) */
|
350
|
+
|
351
|
+
/* Restore warning for multiple assignment operators */
|
352
|
+
#if defined(_MSC_VER) && _MSC_VER >= 1500
|
353
|
+
#pragma warning( pop )
|
354
|
+
#endif /* defined(_MSC_VER) && _MSC_VER >= 1500 */
|
355
|
+
|
356
|
+
/* Restore -Weffc++ warnings from here on */
|
357
|
+
#if defined(__GNUC__)
|
358
|
+
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
|
359
|
+
#pragma GCC diagnostic pop
|
360
|
+
#endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */
|
361
|
+
#endif /* defined(__GNUC__) */
|
362
|
+
|
363
|
+
#undef __CUDA_HOSTDEVICE__
|
364
|
+
#undef __CUDA_ALIGN__
|
365
|
+
|
366
|
+
#ifndef __CUDACC_RTC__ /* no host functions in NVRTC mode */
|
367
|
+
static inline unsigned short __internal_float2half(const float f, unsigned int &sign, unsigned int &remainder)
|
368
|
+
{
|
369
|
+
unsigned int x;
|
370
|
+
unsigned int u;
|
371
|
+
unsigned int result;
|
372
|
+
#if defined(__CUDACC__)
|
373
|
+
(void)memcpy(&x, &f, sizeof(f));
|
374
|
+
#else
|
375
|
+
(void)std::memcpy(&x, &f, sizeof(f));
|
376
|
+
#endif
|
377
|
+
u = (x & 0x7fffffffU);
|
378
|
+
sign = ((x >> 16U) & 0x8000U);
|
379
|
+
// NaN/+Inf/-Inf
|
380
|
+
if (u >= 0x7f800000U) {
|
381
|
+
remainder = 0U;
|
382
|
+
result = ((u == 0x7f800000U) ? (sign | 0x7c00U) : 0x7fffU);
|
383
|
+
} else if (u > 0x477fefffU) { // Overflows
|
384
|
+
remainder = 0x80000000U;
|
385
|
+
result = (sign | 0x7bffU);
|
386
|
+
} else if (u >= 0x38800000U) { // Normal numbers
|
387
|
+
remainder = u << 19U;
|
388
|
+
u -= 0x38000000U;
|
389
|
+
result = (sign | (u >> 13U));
|
390
|
+
} else if (u < 0x33000001U) { // +0/-0
|
391
|
+
remainder = u;
|
392
|
+
result = sign;
|
393
|
+
} else { // Denormal numbers
|
394
|
+
const unsigned int exponent = u >> 23U;
|
395
|
+
const unsigned int shift = 0x7eU - exponent;
|
396
|
+
unsigned int mantissa = (u & 0x7fffffU);
|
397
|
+
mantissa |= 0x800000U;
|
398
|
+
remainder = mantissa << (32U - shift);
|
399
|
+
result = (sign | (mantissa >> shift));
|
400
|
+
result &= 0x0000FFFFU;
|
401
|
+
}
|
402
|
+
return static_cast<unsigned short>(result);
|
403
|
+
}
|
404
|
+
#endif /* #if !defined(__CUDACC_RTC__) */
|
405
|
+
|
406
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __double2half(const double a)
|
407
|
+
{
|
408
|
+
IF_DEVICE_OR_CUDACC(
|
409
|
+
__half val;
|
410
|
+
asm("{ cvt.rn.f16.f64 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "d"(a));
|
411
|
+
return val;
|
412
|
+
,
|
413
|
+
__half result;
|
414
|
+
// Perform rounding to 11 bits of precision, convert value
|
415
|
+
// to float and call existing float to half conversion.
|
416
|
+
// By pre-rounding to 11 bits we avoid additional rounding
|
417
|
+
// in float to half conversion.
|
418
|
+
unsigned long long int absa;
|
419
|
+
unsigned long long int ua;
|
420
|
+
(void)memcpy(&ua, &a, sizeof(a));
|
421
|
+
absa = (ua & 0x7fffffffffffffffULL);
|
422
|
+
if ((absa >= 0x40f0000000000000ULL) || (absa <= 0x3e60000000000000ULL))
|
423
|
+
{
|
424
|
+
// |a| >= 2^16 or NaN or |a| <= 2^(-25)
|
425
|
+
// double-rounding is not a problem
|
426
|
+
result = __float2half(static_cast<float>(a));
|
427
|
+
}
|
428
|
+
else
|
429
|
+
{
|
430
|
+
// here 2^(-25) < |a| < 2^16
|
431
|
+
// prepare shifter value such that a + shifter
|
432
|
+
// done in double precision performs round-to-nearest-even
|
433
|
+
// and (a + shifter) - shifter results in a rounded to
|
434
|
+
// 11 bits of precision. Shifter needs to have exponent of
|
435
|
+
// a plus 53 - 11 = 42 and a leading bit in mantissa to guard
|
436
|
+
// against negative values.
|
437
|
+
// So need to have |a| capped to avoid overflow in exponent.
|
438
|
+
// For inputs that are smaller than half precision minnorm
|
439
|
+
// we prepare fixed shifter exponent.
|
440
|
+
unsigned long long shifterBits;
|
441
|
+
if (absa >= 0x3f10000000000000ULL)
|
442
|
+
{ // Here if |a| >= 2^(-14)
|
443
|
+
// add 42 to exponent bits
|
444
|
+
shifterBits = (ua & 0x7ff0000000000000ULL) + 0x02A0000000000000ULL;
|
445
|
+
}
|
446
|
+
else
|
447
|
+
{ // 2^(-25) < |a| < 2^(-14), potentially results in denormal
|
448
|
+
// set exponent bits to 42 - 14 + bias
|
449
|
+
shifterBits = 0x41B0000000000000ULL;
|
450
|
+
}
|
451
|
+
// set leading mantissa bit to protect against negative inputs
|
452
|
+
shifterBits |= 0x0008000000000000ULL;
|
453
|
+
double shifter;
|
454
|
+
(void)memcpy(&shifter, &shifterBits, sizeof(shifterBits));
|
455
|
+
double aShiftRound = a + shifter;
|
456
|
+
|
457
|
+
// Prevent the compiler from optimizing away a + shifter - shifter
|
458
|
+
// by doing intermediate memcopy and harmless bitwize operation
|
459
|
+
unsigned long long int aShiftRoundBits;
|
460
|
+
(void)memcpy(&aShiftRoundBits, &aShiftRound, sizeof(aShiftRound));
|
461
|
+
|
462
|
+
// the value is positive, so this operation doesn't change anything
|
463
|
+
aShiftRoundBits &= 0x7fffffffffffffffULL;
|
464
|
+
|
465
|
+
(void)memcpy(&aShiftRound, &aShiftRoundBits, sizeof(aShiftRound));
|
466
|
+
|
467
|
+
result = __float2half(static_cast<float>(aShiftRound - shifter));
|
468
|
+
}
|
469
|
+
|
470
|
+
return result;
|
471
|
+
,
|
472
|
+
__half result;
|
473
|
+
/*
|
474
|
+
// Perform rounding to 11 bits of precision, convert value
|
475
|
+
// to float and call existing float to half conversion.
|
476
|
+
// By pre-rounding to 11 bits we avoid additional rounding
|
477
|
+
// in float to half conversion.
|
478
|
+
*/
|
479
|
+
unsigned long long int absa;
|
480
|
+
unsigned long long int ua;
|
481
|
+
(void)std::memcpy(&ua, &a, sizeof(a));
|
482
|
+
absa = (ua & 0x7fffffffffffffffULL);
|
483
|
+
if ((absa >= 0x40f0000000000000ULL) || (absa <= 0x3e60000000000000ULL))
|
484
|
+
{
|
485
|
+
/*
|
486
|
+
// |a| >= 2^16 or NaN or |a| <= 2^(-25)
|
487
|
+
// double-rounding is not a problem
|
488
|
+
*/
|
489
|
+
result = __float2half(static_cast<float>(a));
|
490
|
+
}
|
491
|
+
else
|
492
|
+
{
|
493
|
+
/*
|
494
|
+
// here 2^(-25) < |a| < 2^16
|
495
|
+
// prepare shifter value such that a + shifter
|
496
|
+
// done in double precision performs round-to-nearest-even
|
497
|
+
// and (a + shifter) - shifter results in a rounded to
|
498
|
+
// 11 bits of precision. Shifter needs to have exponent of
|
499
|
+
// a plus 53 - 11 = 42 and a leading bit in mantissa to guard
|
500
|
+
// against negative values.
|
501
|
+
// So need to have |a| capped to avoid overflow in exponent.
|
502
|
+
// For inputs that are smaller than half precision minnorm
|
503
|
+
// we prepare fixed shifter exponent.
|
504
|
+
*/
|
505
|
+
unsigned long long shifterBits;
|
506
|
+
if (absa >= 0x3f10000000000000ULL)
|
507
|
+
{
|
508
|
+
/*
|
509
|
+
// Here if |a| >= 2^(-14)
|
510
|
+
// add 42 to exponent bits
|
511
|
+
*/
|
512
|
+
shifterBits = (ua & 0x7ff0000000000000ULL) + 0x02A0000000000000ULL;
|
513
|
+
}
|
514
|
+
else
|
515
|
+
{
|
516
|
+
/*
|
517
|
+
// 2^(-25) < |a| < 2^(-14), potentially results in denormal
|
518
|
+
// set exponent bits to 42 - 14 + bias
|
519
|
+
*/
|
520
|
+
shifterBits = 0x41B0000000000000ULL;
|
521
|
+
}
|
522
|
+
// set leading mantissa bit to protect against negative inputs
|
523
|
+
shifterBits |= 0x0008000000000000ULL;
|
524
|
+
double shifter;
|
525
|
+
(void)std::memcpy(&shifter, &shifterBits, sizeof(shifterBits));
|
526
|
+
double aShiftRound = a + shifter;
|
527
|
+
|
528
|
+
/*
|
529
|
+
// Prevent the compiler from optimizing away a + shifter - shifter
|
530
|
+
// by doing intermediate memcopy and harmless bitwize operation
|
531
|
+
*/
|
532
|
+
unsigned long long int aShiftRoundBits;
|
533
|
+
(void)std::memcpy(&aShiftRoundBits, &aShiftRound, sizeof(aShiftRound));
|
534
|
+
|
535
|
+
// the value is positive, so this operation doesn't change anything
|
536
|
+
aShiftRoundBits &= 0x7fffffffffffffffULL;
|
537
|
+
|
538
|
+
(void)std::memcpy(&aShiftRound, &aShiftRoundBits, sizeof(aShiftRound));
|
539
|
+
|
540
|
+
result = __float2half(static_cast<float>(aShiftRound - shifter));
|
541
|
+
}
|
542
|
+
|
543
|
+
return result;
|
544
|
+
)
|
545
|
+
}
|
546
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half(const float a)
|
547
|
+
{
|
548
|
+
__half val;
|
549
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
550
|
+
asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
|
551
|
+
,
|
552
|
+
__half_raw r;
|
553
|
+
unsigned int sign = 0U;
|
554
|
+
unsigned int remainder = 0U;
|
555
|
+
r.x = __internal_float2half(a, sign, remainder);
|
556
|
+
if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) {
|
557
|
+
r.x++;
|
558
|
+
}
|
559
|
+
val = r;
|
560
|
+
)
|
561
|
+
return val;
|
562
|
+
}
|
563
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rn(const float a)
|
564
|
+
{
|
565
|
+
__half val;
|
566
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
567
|
+
asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
|
568
|
+
,
|
569
|
+
__half_raw r;
|
570
|
+
unsigned int sign = 0U;
|
571
|
+
unsigned int remainder = 0U;
|
572
|
+
r.x = __internal_float2half(a, sign, remainder);
|
573
|
+
if ((remainder > 0x80000000U) || ((remainder == 0x80000000U) && ((r.x & 0x1U) != 0U))) {
|
574
|
+
r.x++;
|
575
|
+
}
|
576
|
+
val = r;
|
577
|
+
)
|
578
|
+
return val;
|
579
|
+
}
|
580
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rz(const float a)
|
581
|
+
{
|
582
|
+
__half val;
|
583
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
584
|
+
asm("{ cvt.rz.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
|
585
|
+
,
|
586
|
+
__half_raw r;
|
587
|
+
unsigned int sign = 0U;
|
588
|
+
unsigned int remainder = 0U;
|
589
|
+
r.x = __internal_float2half(a, sign, remainder);
|
590
|
+
val = r;
|
591
|
+
)
|
592
|
+
return val;
|
593
|
+
}
|
594
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_rd(const float a)
|
595
|
+
{
|
596
|
+
__half val;
|
597
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
598
|
+
asm("{ cvt.rm.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
|
599
|
+
,
|
600
|
+
__half_raw r;
|
601
|
+
unsigned int sign = 0U;
|
602
|
+
unsigned int remainder = 0U;
|
603
|
+
r.x = __internal_float2half(a, sign, remainder);
|
604
|
+
if ((remainder != 0U) && (sign != 0U)) {
|
605
|
+
r.x++;
|
606
|
+
}
|
607
|
+
val = r;
|
608
|
+
)
|
609
|
+
return val;
|
610
|
+
}
|
611
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __float2half_ru(const float a)
|
612
|
+
{
|
613
|
+
__half val;
|
614
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
615
|
+
asm("{ cvt.rp.f16.f32 %0, %1;}\n" : "=h"(__HALF_TO_US(val)) : "f"(a));
|
616
|
+
,
|
617
|
+
__half_raw r;
|
618
|
+
unsigned int sign = 0U;
|
619
|
+
unsigned int remainder = 0U;
|
620
|
+
r.x = __internal_float2half(a, sign, remainder);
|
621
|
+
if ((remainder != 0U) && (sign == 0U)) {
|
622
|
+
r.x++;
|
623
|
+
}
|
624
|
+
val = r;
|
625
|
+
)
|
626
|
+
return val;
|
627
|
+
}
|
628
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float2half2_rn(const float a)
|
629
|
+
{
|
630
|
+
__half2 val;
|
631
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
632
|
+
asm("{.reg .f16 low;\n"
|
633
|
+
" cvt.rn.f16.f32 low, %1;\n"
|
634
|
+
" mov.b32 %0, {low,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "f"(a));
|
635
|
+
,
|
636
|
+
val = __half2(__float2half_rn(a), __float2half_rn(a));
|
637
|
+
)
|
638
|
+
return val;
|
639
|
+
}
|
640
|
+
|
641
|
+
#if defined(__CUDACC__) || defined(_NVHPC_CUDA)
|
642
|
+
__CUDA_FP16_DECL__ __half2 __internal_device_float2_to_half2_rn(const float a, const float b) {
|
643
|
+
__half2 val;
|
644
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
|
645
|
+
asm("{ cvt.rn.f16x2.f32 %0, %2, %1; }\n"
|
646
|
+
: "=r"(__HALF2_TO_UI(val)) : "f"(a), "f"(b));
|
647
|
+
,
|
648
|
+
asm("{.reg .f16 low,high;\n"
|
649
|
+
" cvt.rn.f16.f32 low, %1;\n"
|
650
|
+
" cvt.rn.f16.f32 high, %2;\n"
|
651
|
+
" mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "f"(a), "f"(b));
|
652
|
+
)
|
653
|
+
return val;
|
654
|
+
}
|
655
|
+
|
656
|
+
#endif /* defined(__CUDACC__) || defined(_NVHPC_CUDA) */
|
657
|
+
|
658
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __floats2half2_rn(const float a, const float b)
|
659
|
+
{
|
660
|
+
__half2 val;
|
661
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
662
|
+
val = __internal_device_float2_to_half2_rn(a,b);
|
663
|
+
,
|
664
|
+
val = __half2(__float2half_rn(a), __float2half_rn(b));
|
665
|
+
)
|
666
|
+
return val;
|
667
|
+
}
|
668
|
+
|
669
|
+
#ifndef __CUDACC_RTC__ /* no host functions in NVRTC mode */
|
670
|
+
static inline float __internal_half2float(const unsigned short h)
|
671
|
+
{
|
672
|
+
unsigned int sign = ((static_cast<unsigned int>(h) >> 15U) & 1U);
|
673
|
+
unsigned int exponent = ((static_cast<unsigned int>(h) >> 10U) & 0x1fU);
|
674
|
+
unsigned int mantissa = ((static_cast<unsigned int>(h) & 0x3ffU) << 13U);
|
675
|
+
float f;
|
676
|
+
if (exponent == 0x1fU) { /* NaN or Inf */
|
677
|
+
/* discard sign of a NaN */
|
678
|
+
sign = ((mantissa != 0U) ? (sign >> 1U) : sign);
|
679
|
+
mantissa = ((mantissa != 0U) ? 0x7fffffU : 0U);
|
680
|
+
exponent = 0xffU;
|
681
|
+
} else if (exponent == 0U) { /* Denorm or Zero */
|
682
|
+
if (mantissa != 0U) {
|
683
|
+
unsigned int msb;
|
684
|
+
exponent = 0x71U;
|
685
|
+
do {
|
686
|
+
msb = (mantissa & 0x400000U);
|
687
|
+
mantissa <<= 1U; /* normalize */
|
688
|
+
--exponent;
|
689
|
+
} while (msb == 0U);
|
690
|
+
mantissa &= 0x7fffffU; /* 1.mantissa is implicit */
|
691
|
+
}
|
692
|
+
} else {
|
693
|
+
exponent += 0x70U;
|
694
|
+
}
|
695
|
+
const unsigned int u = ((sign << 31U) | (exponent << 23U) | mantissa);
|
696
|
+
#if defined(__CUDACC__)
|
697
|
+
(void)memcpy(&f, &u, sizeof(u));
|
698
|
+
#else
|
699
|
+
(void)std::memcpy(&f, &u, sizeof(u));
|
700
|
+
#endif
|
701
|
+
return f;
|
702
|
+
}
|
703
|
+
#endif /* !defined(__CUDACC_RTC__) */
|
704
|
+
|
705
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ float __half2float(const __half a)
|
706
|
+
{
|
707
|
+
float val;
|
708
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
709
|
+
asm("{ cvt.f32.f16 %0, %1;}\n" : "=f"(val) : "h"(__HALF_TO_CUS(a)));
|
710
|
+
,
|
711
|
+
val = __internal_half2float(static_cast<__half_raw>(a).x);
|
712
|
+
)
|
713
|
+
return val;
|
714
|
+
}
|
715
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ float __low2float(const __half2 a)
|
716
|
+
{
|
717
|
+
float val;
|
718
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
719
|
+
asm("{.reg .f16 low,high;\n"
|
720
|
+
" mov.b32 {low,high},%1;\n"
|
721
|
+
" cvt.f32.f16 %0, low;}\n" : "=f"(val) : "r"(__HALF2_TO_CUI(a)));
|
722
|
+
,
|
723
|
+
val = __internal_half2float(static_cast<__half2_raw>(a).x);
|
724
|
+
)
|
725
|
+
return val;
|
726
|
+
}
|
727
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ float __high2float(const __half2 a)
|
728
|
+
{
|
729
|
+
float val;
|
730
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
731
|
+
asm("{.reg .f16 low,high;\n"
|
732
|
+
" mov.b32 {low,high},%1;\n"
|
733
|
+
" cvt.f32.f16 %0, high;}\n" : "=f"(val) : "r"(__HALF2_TO_CUI(a)));
|
734
|
+
,
|
735
|
+
val = __internal_half2float(static_cast<__half2_raw>(a).y);
|
736
|
+
)
|
737
|
+
return val;
|
738
|
+
}
|
739
|
+
|
740
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ signed char __half2char_rz(const __half h)
|
741
|
+
{
|
742
|
+
signed char i;
|
743
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
744
|
+
unsigned int tmp;
|
745
|
+
asm("cvt.rzi.s8.f16 %0, %1;" : "=r"(tmp) : "h"(__HALF_TO_CUS(h)));
|
746
|
+
const unsigned char u = static_cast<unsigned char>(tmp);
|
747
|
+
i = static_cast<signed char>(u);
|
748
|
+
,
|
749
|
+
const float f = __half2float(h);
|
750
|
+
const signed char max_val = (signed char)0x7fU;
|
751
|
+
const signed char min_val = (signed char)0x80U;
|
752
|
+
const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
|
753
|
+
// saturation fixup
|
754
|
+
if (bits > (unsigned short)0xF800U) {
|
755
|
+
// NaN
|
756
|
+
i = 0;
|
757
|
+
} else if (f > static_cast<float>(max_val)) {
|
758
|
+
// saturate maximum
|
759
|
+
i = max_val;
|
760
|
+
} else if (f < static_cast<float>(min_val)) {
|
761
|
+
// saturate minimum
|
762
|
+
i = min_val;
|
763
|
+
} else {
|
764
|
+
// normal value, conversion is well-defined
|
765
|
+
i = static_cast<signed char>(f);
|
766
|
+
}
|
767
|
+
)
|
768
|
+
return i;
|
769
|
+
}
|
770
|
+
|
771
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned char __half2uchar_rz(const __half h)
|
772
|
+
{
|
773
|
+
unsigned char i;
|
774
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
775
|
+
unsigned int tmp;
|
776
|
+
asm("cvt.rzi.u8.f16 %0, %1;" : "=r"(tmp) : "h"(__HALF_TO_CUS(h)));
|
777
|
+
i = static_cast<unsigned char>(tmp);
|
778
|
+
,
|
779
|
+
const float f = __half2float(h);
|
780
|
+
const unsigned char max_val = 0xffU;
|
781
|
+
const unsigned char min_val = 0U;
|
782
|
+
const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
|
783
|
+
// saturation fixup
|
784
|
+
if (bits > (unsigned short)0xF800U) {
|
785
|
+
// NaN
|
786
|
+
i = 0U;
|
787
|
+
} else if (f > static_cast<float>(max_val)) {
|
788
|
+
// saturate maximum
|
789
|
+
i = max_val;
|
790
|
+
} else if (f < static_cast<float>(min_val)) {
|
791
|
+
// saturate minimum
|
792
|
+
i = min_val;
|
793
|
+
} else {
|
794
|
+
// normal value, conversion is well-defined
|
795
|
+
i = static_cast<unsigned char>(f);
|
796
|
+
}
|
797
|
+
)
|
798
|
+
return i;
|
799
|
+
}
|
800
|
+
|
801
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ short int __half2short_rz(const __half h)
|
802
|
+
{
|
803
|
+
short int i;
|
804
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
805
|
+
asm("cvt.rzi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
806
|
+
,
|
807
|
+
const float f = __half2float(h);
|
808
|
+
const short int max_val = (short int)0x7fffU;
|
809
|
+
const short int min_val = (short int)0x8000U;
|
810
|
+
const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
|
811
|
+
// saturation fixup
|
812
|
+
if (bits > (unsigned short)0xF800U) {
|
813
|
+
// NaN
|
814
|
+
i = 0;
|
815
|
+
} else if (f > static_cast<float>(max_val)) {
|
816
|
+
// saturate maximum
|
817
|
+
i = max_val;
|
818
|
+
} else if (f < static_cast<float>(min_val)) {
|
819
|
+
// saturate minimum
|
820
|
+
i = min_val;
|
821
|
+
} else {
|
822
|
+
// normal value, conversion is well-defined
|
823
|
+
i = static_cast<short int>(f);
|
824
|
+
}
|
825
|
+
)
|
826
|
+
return i;
|
827
|
+
}
|
828
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned short int __half2ushort_rz(const __half h)
|
829
|
+
{
|
830
|
+
unsigned short int i;
|
831
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
832
|
+
asm("cvt.rzi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
833
|
+
,
|
834
|
+
const float f = __half2float(h);
|
835
|
+
const unsigned short int max_val = 0xffffU;
|
836
|
+
const unsigned short int min_val = 0U;
|
837
|
+
const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
|
838
|
+
// saturation fixup
|
839
|
+
if (bits > (unsigned short)0xF800U) {
|
840
|
+
// NaN
|
841
|
+
i = 0U;
|
842
|
+
} else if (f > static_cast<float>(max_val)) {
|
843
|
+
// saturate maximum
|
844
|
+
i = max_val;
|
845
|
+
} else if (f < static_cast<float>(min_val)) {
|
846
|
+
// saturate minimum
|
847
|
+
i = min_val;
|
848
|
+
} else {
|
849
|
+
// normal value, conversion is well-defined
|
850
|
+
i = static_cast<unsigned short int>(f);
|
851
|
+
}
|
852
|
+
)
|
853
|
+
return i;
|
854
|
+
}
|
855
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ int __half2int_rz(const __half h)
|
856
|
+
{
|
857
|
+
int i;
|
858
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
859
|
+
asm("cvt.rzi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
860
|
+
,
|
861
|
+
const float f = __half2float(h);
|
862
|
+
const int max_val = (int)0x7fffffffU;
|
863
|
+
const int min_val = (int)0x80000000U;
|
864
|
+
const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
|
865
|
+
// saturation fixup
|
866
|
+
if (bits > (unsigned short)0xF800U) {
|
867
|
+
// NaN
|
868
|
+
i = 0;
|
869
|
+
} else if (f > static_cast<float>(max_val)) {
|
870
|
+
// saturate maximum
|
871
|
+
i = max_val;
|
872
|
+
} else if (f < static_cast<float>(min_val)) {
|
873
|
+
// saturate minimum
|
874
|
+
i = min_val;
|
875
|
+
} else {
|
876
|
+
// normal value, conversion is well-defined
|
877
|
+
i = static_cast<int>(f);
|
878
|
+
}
|
879
|
+
)
|
880
|
+
return i;
|
881
|
+
}
|
882
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __half2uint_rz(const __half h)
|
883
|
+
{
|
884
|
+
unsigned int i;
|
885
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
886
|
+
asm("cvt.rzi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
887
|
+
,
|
888
|
+
const float f = __half2float(h);
|
889
|
+
const unsigned int max_val = 0xffffffffU;
|
890
|
+
const unsigned int min_val = 0U;
|
891
|
+
const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
|
892
|
+
// saturation fixup
|
893
|
+
if (bits > (unsigned short)0xF800U) {
|
894
|
+
// NaN
|
895
|
+
i = 0U;
|
896
|
+
} else if (f > static_cast<float>(max_val)) {
|
897
|
+
// saturate maximum
|
898
|
+
i = max_val;
|
899
|
+
} else if (f < static_cast<float>(min_val)) {
|
900
|
+
// saturate minimum
|
901
|
+
i = min_val;
|
902
|
+
} else {
|
903
|
+
// normal value, conversion is well-defined
|
904
|
+
i = static_cast<unsigned int>(f);
|
905
|
+
}
|
906
|
+
)
|
907
|
+
return i;
|
908
|
+
}
|
909
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ long long int __half2ll_rz(const __half h)
|
910
|
+
{
|
911
|
+
long long int i;
|
912
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
913
|
+
asm("cvt.rzi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
914
|
+
,
|
915
|
+
const float f = __half2float(h);
|
916
|
+
const long long int max_val = (long long int)0x7fffffffffffffffULL;
|
917
|
+
const long long int min_val = (long long int)0x8000000000000000ULL;
|
918
|
+
const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
|
919
|
+
// saturation fixup
|
920
|
+
if (bits > (unsigned short)0xF800U) {
|
921
|
+
// NaN
|
922
|
+
i = min_val;
|
923
|
+
} else if (f > static_cast<float>(max_val)) {
|
924
|
+
// saturate maximum
|
925
|
+
i = max_val;
|
926
|
+
} else if (f < static_cast<float>(min_val)) {
|
927
|
+
// saturate minimum
|
928
|
+
i = min_val;
|
929
|
+
} else {
|
930
|
+
// normal value, conversion is well-defined
|
931
|
+
i = static_cast<long long int>(f);
|
932
|
+
}
|
933
|
+
)
|
934
|
+
return i;
|
935
|
+
}
|
936
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned long long int __half2ull_rz(const __half h)
|
937
|
+
{
|
938
|
+
unsigned long long int i;
|
939
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
940
|
+
asm("cvt.rzi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
941
|
+
,
|
942
|
+
const float f = __half2float(h);
|
943
|
+
const unsigned long long int max_val = 0xffffffffffffffffULL;
|
944
|
+
const unsigned long long int min_val = 0ULL;
|
945
|
+
const unsigned short bits = static_cast<unsigned short>(static_cast<__half_raw>(h).x << 1U);
|
946
|
+
// saturation fixup
|
947
|
+
if (bits > (unsigned short)0xF800U) {
|
948
|
+
// NaN
|
949
|
+
i = 0x8000000000000000ULL;
|
950
|
+
} else if (f > static_cast<float>(max_val)) {
|
951
|
+
// saturate maximum
|
952
|
+
i = max_val;
|
953
|
+
} else if (f < static_cast<float>(min_val)) {
|
954
|
+
// saturate minimum
|
955
|
+
i = min_val;
|
956
|
+
} else {
|
957
|
+
// normal value, conversion is well-defined
|
958
|
+
i = static_cast<unsigned long long int>(f);
|
959
|
+
}
|
960
|
+
)
|
961
|
+
return i;
|
962
|
+
}
|
963
|
+
/* CUDA vector-types compatible vector creation function (note returns __half2, not half2) */
|
964
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 make_half2(const __half x, const __half y)
|
965
|
+
{
|
966
|
+
__half2 t; t.x = x; t.y = y; return t;
|
967
|
+
}
|
968
|
+
|
969
|
+
|
970
|
+
/* Definitions of intrinsics */
|
971
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __float22half2_rn(const float2 a)
|
972
|
+
{
|
973
|
+
const __half2 val = __floats2half2_rn(a.x, a.y);
|
974
|
+
return val;
|
975
|
+
}
|
976
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ float2 __half22float2(const __half2 a)
|
977
|
+
{
|
978
|
+
float hi_float;
|
979
|
+
float lo_float;
|
980
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
981
|
+
asm("{.reg .f16 low,high;\n"
|
982
|
+
" mov.b32 {low,high},%1;\n"
|
983
|
+
" cvt.f32.f16 %0, low;}\n" : "=f"(lo_float) : "r"(__HALF2_TO_CUI(a)));
|
984
|
+
|
985
|
+
asm("{.reg .f16 low,high;\n"
|
986
|
+
" mov.b32 {low,high},%1;\n"
|
987
|
+
" cvt.f32.f16 %0, high;}\n" : "=f"(hi_float) : "r"(__HALF2_TO_CUI(a)));
|
988
|
+
,
|
989
|
+
lo_float = __internal_half2float(((__half2_raw)a).x);
|
990
|
+
hi_float = __internal_half2float(((__half2_raw)a).y);
|
991
|
+
)
|
992
|
+
return make_float2(lo_float, hi_float);
|
993
|
+
}
|
994
|
+
#if defined(__CUDACC__) || defined(_NVHPC_CUDA)
|
995
|
+
__CUDA_FP16_DECL__ int __half2int_rn(const __half h)
|
996
|
+
{
|
997
|
+
int i;
|
998
|
+
asm("cvt.rni.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
999
|
+
return i;
|
1000
|
+
}
|
1001
|
+
__CUDA_FP16_DECL__ int __half2int_rd(const __half h)
|
1002
|
+
{
|
1003
|
+
int i;
|
1004
|
+
asm("cvt.rmi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
1005
|
+
return i;
|
1006
|
+
}
|
1007
|
+
__CUDA_FP16_DECL__ int __half2int_ru(const __half h)
|
1008
|
+
{
|
1009
|
+
int i;
|
1010
|
+
asm("cvt.rpi.s32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
1011
|
+
return i;
|
1012
|
+
}
|
1013
|
+
#endif /* defined(__CUDACC__) || defined(_NVHPC_CUDA) */
|
1014
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __int2half_rn(const int i)
|
1015
|
+
{
|
1016
|
+
__half h;
|
1017
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1018
|
+
asm("cvt.rn.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
1019
|
+
,
|
1020
|
+
// double-rounding is not a problem here: if integer
|
1021
|
+
// has more than 24 bits, it is already too large to
|
1022
|
+
// be represented in half precision, and result will
|
1023
|
+
// be infinity.
|
1024
|
+
const float f = static_cast<float>(i);
|
1025
|
+
h = __float2half_rn(f);
|
1026
|
+
)
|
1027
|
+
return h;
|
1028
|
+
}
|
1029
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __int2half_rz(const int i)
|
1030
|
+
{
|
1031
|
+
__half h;
|
1032
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1033
|
+
asm("cvt.rz.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
1034
|
+
,
|
1035
|
+
const float f = static_cast<float>(i);
|
1036
|
+
h = __float2half_rz(f);
|
1037
|
+
)
|
1038
|
+
return h;
|
1039
|
+
}
|
1040
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __int2half_rd(const int i)
|
1041
|
+
{
|
1042
|
+
__half h;
|
1043
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1044
|
+
asm("cvt.rm.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
1045
|
+
,
|
1046
|
+
const float f = static_cast<float>(i);
|
1047
|
+
h = __float2half_rd(f);
|
1048
|
+
)
|
1049
|
+
return h;
|
1050
|
+
}
|
1051
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __int2half_ru(const int i)
|
1052
|
+
{
|
1053
|
+
__half h;
|
1054
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1055
|
+
asm("cvt.rp.f16.s32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
1056
|
+
,
|
1057
|
+
const float f = static_cast<float>(i);
|
1058
|
+
h = __float2half_ru(f);
|
1059
|
+
)
|
1060
|
+
return h;
|
1061
|
+
}
|
1062
|
+
#if defined(__CUDACC__) || defined(_NVHPC_CUDA)
|
1063
|
+
__CUDA_FP16_DECL__ short int __half2short_rn(const __half h)
|
1064
|
+
{
|
1065
|
+
short int i;
|
1066
|
+
asm("cvt.rni.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
1067
|
+
return i;
|
1068
|
+
}
|
1069
|
+
__CUDA_FP16_DECL__ short int __half2short_rd(const __half h)
|
1070
|
+
{
|
1071
|
+
short int i;
|
1072
|
+
asm("cvt.rmi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
1073
|
+
return i;
|
1074
|
+
}
|
1075
|
+
__CUDA_FP16_DECL__ short int __half2short_ru(const __half h)
|
1076
|
+
{
|
1077
|
+
short int i;
|
1078
|
+
asm("cvt.rpi.s16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
1079
|
+
return i;
|
1080
|
+
}
|
1081
|
+
#endif /* defined(__CUDACC__) || defined(_NVHPC_CUDA) */
|
1082
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __short2half_rn(const short int i)
|
1083
|
+
{
|
1084
|
+
__half h;
|
1085
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1086
|
+
asm("cvt.rn.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
1087
|
+
,
|
1088
|
+
const float f = static_cast<float>(i);
|
1089
|
+
h = __float2half_rn(f);
|
1090
|
+
)
|
1091
|
+
return h;
|
1092
|
+
}
|
1093
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __short2half_rz(const short int i)
|
1094
|
+
{
|
1095
|
+
__half h;
|
1096
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1097
|
+
asm("cvt.rz.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
1098
|
+
,
|
1099
|
+
const float f = static_cast<float>(i);
|
1100
|
+
h = __float2half_rz(f);
|
1101
|
+
)
|
1102
|
+
return h;
|
1103
|
+
}
|
1104
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __short2half_rd(const short int i)
|
1105
|
+
{
|
1106
|
+
__half h;
|
1107
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1108
|
+
asm("cvt.rm.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
1109
|
+
,
|
1110
|
+
const float f = static_cast<float>(i);
|
1111
|
+
h = __float2half_rd(f);
|
1112
|
+
)
|
1113
|
+
return h;
|
1114
|
+
}
|
1115
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __short2half_ru(const short int i)
|
1116
|
+
{
|
1117
|
+
__half h;
|
1118
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1119
|
+
asm("cvt.rp.f16.s16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
1120
|
+
,
|
1121
|
+
const float f = static_cast<float>(i);
|
1122
|
+
h = __float2half_ru(f);
|
1123
|
+
)
|
1124
|
+
return h;
|
1125
|
+
}
|
1126
|
+
#if defined(__CUDACC__) || defined(_NVHPC_CUDA)
|
1127
|
+
__CUDA_FP16_DECL__ unsigned int __half2uint_rn(const __half h)
|
1128
|
+
{
|
1129
|
+
unsigned int i;
|
1130
|
+
asm("cvt.rni.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
1131
|
+
return i;
|
1132
|
+
}
|
1133
|
+
__CUDA_FP16_DECL__ unsigned int __half2uint_rd(const __half h)
|
1134
|
+
{
|
1135
|
+
unsigned int i;
|
1136
|
+
asm("cvt.rmi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
1137
|
+
return i;
|
1138
|
+
}
|
1139
|
+
__CUDA_FP16_DECL__ unsigned int __half2uint_ru(const __half h)
|
1140
|
+
{
|
1141
|
+
unsigned int i;
|
1142
|
+
asm("cvt.rpi.u32.f16 %0, %1;" : "=r"(i) : "h"(__HALF_TO_CUS(h)));
|
1143
|
+
return i;
|
1144
|
+
}
|
1145
|
+
#endif /* defined(__CUDACC__) || defined(_NVHPC_CUDA) */
|
1146
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __uint2half_rn(const unsigned int i)
|
1147
|
+
{
|
1148
|
+
__half h;
|
1149
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1150
|
+
asm("cvt.rn.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
1151
|
+
,
|
1152
|
+
// double-rounding is not a problem here: if integer
|
1153
|
+
// has more than 24 bits, it is already too large to
|
1154
|
+
// be represented in half precision, and result will
|
1155
|
+
// be infinity.
|
1156
|
+
const float f = static_cast<float>(i);
|
1157
|
+
h = __float2half_rn(f);
|
1158
|
+
)
|
1159
|
+
return h;
|
1160
|
+
}
|
1161
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __uint2half_rz(const unsigned int i)
|
1162
|
+
{
|
1163
|
+
__half h;
|
1164
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1165
|
+
asm("cvt.rz.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
1166
|
+
,
|
1167
|
+
const float f = static_cast<float>(i);
|
1168
|
+
h = __float2half_rz(f);
|
1169
|
+
)
|
1170
|
+
return h;
|
1171
|
+
}
|
1172
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __uint2half_rd(const unsigned int i)
|
1173
|
+
{
|
1174
|
+
__half h;
|
1175
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1176
|
+
asm("cvt.rm.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
1177
|
+
,
|
1178
|
+
const float f = static_cast<float>(i);
|
1179
|
+
h = __float2half_rd(f);
|
1180
|
+
)
|
1181
|
+
return h;
|
1182
|
+
}
|
1183
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __uint2half_ru(const unsigned int i)
|
1184
|
+
{
|
1185
|
+
__half h;
|
1186
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1187
|
+
asm("cvt.rp.f16.u32 %0, %1;" : "=h"(__HALF_TO_US(h)) : "r"(i));
|
1188
|
+
,
|
1189
|
+
const float f = static_cast<float>(i);
|
1190
|
+
h = __float2half_ru(f);
|
1191
|
+
)
|
1192
|
+
return h;
|
1193
|
+
}
|
1194
|
+
#if defined(__CUDACC__) || defined(_NVHPC_CUDA)
|
1195
|
+
__CUDA_FP16_DECL__ unsigned short int __half2ushort_rn(const __half h)
|
1196
|
+
{
|
1197
|
+
unsigned short int i;
|
1198
|
+
asm("cvt.rni.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
1199
|
+
return i;
|
1200
|
+
}
|
1201
|
+
__CUDA_FP16_DECL__ unsigned short int __half2ushort_rd(const __half h)
|
1202
|
+
{
|
1203
|
+
unsigned short int i;
|
1204
|
+
asm("cvt.rmi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
1205
|
+
return i;
|
1206
|
+
}
|
1207
|
+
__CUDA_FP16_DECL__ unsigned short int __half2ushort_ru(const __half h)
|
1208
|
+
{
|
1209
|
+
unsigned short int i;
|
1210
|
+
asm("cvt.rpi.u16.f16 %0, %1;" : "=h"(i) : "h"(__HALF_TO_CUS(h)));
|
1211
|
+
return i;
|
1212
|
+
}
|
1213
|
+
#endif /* defined(__CUDACC__) || defined(_NVHPC_CUDA) */
|
1214
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ushort2half_rn(const unsigned short int i)
|
1215
|
+
{
|
1216
|
+
__half h;
|
1217
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1218
|
+
asm("cvt.rn.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
1219
|
+
,
|
1220
|
+
const float f = static_cast<float>(i);
|
1221
|
+
h = __float2half_rn(f);
|
1222
|
+
)
|
1223
|
+
return h;
|
1224
|
+
}
|
1225
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ushort2half_rz(const unsigned short int i)
|
1226
|
+
{
|
1227
|
+
__half h;
|
1228
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1229
|
+
asm("cvt.rz.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
1230
|
+
,
|
1231
|
+
const float f = static_cast<float>(i);
|
1232
|
+
h = __float2half_rz(f);
|
1233
|
+
)
|
1234
|
+
return h;
|
1235
|
+
}
|
1236
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ushort2half_rd(const unsigned short int i)
|
1237
|
+
{
|
1238
|
+
__half h;
|
1239
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1240
|
+
asm("cvt.rm.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
1241
|
+
,
|
1242
|
+
const float f = static_cast<float>(i);
|
1243
|
+
h = __float2half_rd(f);
|
1244
|
+
)
|
1245
|
+
return h;
|
1246
|
+
}
|
1247
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ushort2half_ru(const unsigned short int i)
|
1248
|
+
{
|
1249
|
+
__half h;
|
1250
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1251
|
+
asm("cvt.rp.f16.u16 %0, %1;" : "=h"(__HALF_TO_US(h)) : "h"(i));
|
1252
|
+
,
|
1253
|
+
const float f = static_cast<float>(i);
|
1254
|
+
h = __float2half_ru(f);
|
1255
|
+
)
|
1256
|
+
return h;
|
1257
|
+
}
|
1258
|
+
#if defined(__CUDACC__) || defined(_NVHPC_CUDA)
|
1259
|
+
__CUDA_FP16_DECL__ unsigned long long int __half2ull_rn(const __half h)
|
1260
|
+
{
|
1261
|
+
unsigned long long int i;
|
1262
|
+
asm("cvt.rni.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
1263
|
+
return i;
|
1264
|
+
}
|
1265
|
+
__CUDA_FP16_DECL__ unsigned long long int __half2ull_rd(const __half h)
|
1266
|
+
{
|
1267
|
+
unsigned long long int i;
|
1268
|
+
asm("cvt.rmi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
1269
|
+
return i;
|
1270
|
+
}
|
1271
|
+
__CUDA_FP16_DECL__ unsigned long long int __half2ull_ru(const __half h)
|
1272
|
+
{
|
1273
|
+
unsigned long long int i;
|
1274
|
+
asm("cvt.rpi.u64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
1275
|
+
return i;
|
1276
|
+
}
|
1277
|
+
#endif /* defined(__CUDACC__) || defined(_NVHPC_CUDA) */
|
1278
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ull2half_rn(const unsigned long long int i)
|
1279
|
+
{
|
1280
|
+
__half h;
|
1281
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1282
|
+
asm("cvt.rn.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
1283
|
+
,
|
1284
|
+
// double-rounding is not a problem here: if integer
|
1285
|
+
// has more than 24 bits, it is already too large to
|
1286
|
+
// be represented in half precision, and result will
|
1287
|
+
// be infinity.
|
1288
|
+
const float f = static_cast<float>(i);
|
1289
|
+
h = __float2half_rn(f);
|
1290
|
+
)
|
1291
|
+
return h;
|
1292
|
+
}
|
1293
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ull2half_rz(const unsigned long long int i)
|
1294
|
+
{
|
1295
|
+
__half h;
|
1296
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1297
|
+
asm("cvt.rz.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
1298
|
+
,
|
1299
|
+
const float f = static_cast<float>(i);
|
1300
|
+
h = __float2half_rz(f);
|
1301
|
+
)
|
1302
|
+
return h;
|
1303
|
+
}
|
1304
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ull2half_rd(const unsigned long long int i)
|
1305
|
+
{
|
1306
|
+
__half h;
|
1307
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1308
|
+
asm("cvt.rm.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
1309
|
+
,
|
1310
|
+
const float f = static_cast<float>(i);
|
1311
|
+
h = __float2half_rd(f);
|
1312
|
+
)
|
1313
|
+
return h;
|
1314
|
+
}
|
1315
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ull2half_ru(const unsigned long long int i)
|
1316
|
+
{
|
1317
|
+
__half h;
|
1318
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1319
|
+
asm("cvt.rp.f16.u64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
1320
|
+
,
|
1321
|
+
const float f = static_cast<float>(i);
|
1322
|
+
h = __float2half_ru(f);
|
1323
|
+
)
|
1324
|
+
return h;
|
1325
|
+
}
|
1326
|
+
#if defined(__CUDACC__) || defined(_NVHPC_CUDA)
|
1327
|
+
__CUDA_FP16_DECL__ long long int __half2ll_rn(const __half h)
|
1328
|
+
{
|
1329
|
+
long long int i;
|
1330
|
+
asm("cvt.rni.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
1331
|
+
return i;
|
1332
|
+
}
|
1333
|
+
__CUDA_FP16_DECL__ long long int __half2ll_rd(const __half h)
|
1334
|
+
{
|
1335
|
+
long long int i;
|
1336
|
+
asm("cvt.rmi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
1337
|
+
return i;
|
1338
|
+
}
|
1339
|
+
__CUDA_FP16_DECL__ long long int __half2ll_ru(const __half h)
|
1340
|
+
{
|
1341
|
+
long long int i;
|
1342
|
+
asm("cvt.rpi.s64.f16 %0, %1;" : "=l"(i) : "h"(__HALF_TO_CUS(h)));
|
1343
|
+
return i;
|
1344
|
+
}
|
1345
|
+
#endif /* defined(__CUDACC__) || defined(_NVHPC_CUDA) */
|
1346
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ll2half_rn(const long long int i)
|
1347
|
+
{
|
1348
|
+
__half h;
|
1349
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1350
|
+
asm("cvt.rn.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
1351
|
+
,
|
1352
|
+
// double-rounding is not a problem here: if integer
|
1353
|
+
// has more than 24 bits, it is already too large to
|
1354
|
+
// be represented in half precision, and result will
|
1355
|
+
// be infinity.
|
1356
|
+
const float f = static_cast<float>(i);
|
1357
|
+
h = __float2half_rn(f);
|
1358
|
+
)
|
1359
|
+
return h;
|
1360
|
+
}
|
1361
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ll2half_rz(const long long int i)
|
1362
|
+
{
|
1363
|
+
__half h;
|
1364
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1365
|
+
asm("cvt.rz.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
1366
|
+
,
|
1367
|
+
const float f = static_cast<float>(i);
|
1368
|
+
h = __float2half_rz(f);
|
1369
|
+
)
|
1370
|
+
return h;
|
1371
|
+
}
|
1372
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ll2half_rd(const long long int i)
|
1373
|
+
{
|
1374
|
+
__half h;
|
1375
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1376
|
+
asm("cvt.rm.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
1377
|
+
,
|
1378
|
+
const float f = static_cast<float>(i);
|
1379
|
+
h = __float2half_rd(f);
|
1380
|
+
)
|
1381
|
+
return h;
|
1382
|
+
}
|
1383
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ll2half_ru(const long long int i)
|
1384
|
+
{
|
1385
|
+
__half h;
|
1386
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1387
|
+
asm("cvt.rp.f16.s64 %0, %1;" : "=h"(__HALF_TO_US(h)) : "l"(i));
|
1388
|
+
,
|
1389
|
+
const float f = static_cast<float>(i);
|
1390
|
+
h = __float2half_ru(f);
|
1391
|
+
)
|
1392
|
+
return h;
|
1393
|
+
}
|
1394
|
+
#if defined(__CUDACC__) || defined(_NVHPC_CUDA)
|
1395
|
+
__CUDA_FP16_DECL__ __half htrunc(const __half h)
|
1396
|
+
{
|
1397
|
+
__half r;
|
1398
|
+
asm("cvt.rzi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h)));
|
1399
|
+
return r;
|
1400
|
+
}
|
1401
|
+
__CUDA_FP16_DECL__ __half hceil(const __half h)
|
1402
|
+
{
|
1403
|
+
__half r;
|
1404
|
+
asm("cvt.rpi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h)));
|
1405
|
+
return r;
|
1406
|
+
}
|
1407
|
+
__CUDA_FP16_DECL__ __half hfloor(const __half h)
|
1408
|
+
{
|
1409
|
+
__half r;
|
1410
|
+
asm("cvt.rmi.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h)));
|
1411
|
+
return r;
|
1412
|
+
}
|
1413
|
+
__CUDA_FP16_DECL__ __half hrint(const __half h)
|
1414
|
+
{
|
1415
|
+
__half r;
|
1416
|
+
asm("cvt.rni.f16.f16 %0, %1;" : "=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(h)));
|
1417
|
+
return r;
|
1418
|
+
}
|
1419
|
+
|
1420
|
+
__CUDA_FP16_DECL__ __half2 h2trunc(const __half2 h)
|
1421
|
+
{
|
1422
|
+
__half2 val;
|
1423
|
+
asm("{.reg .f16 low,high;\n"
|
1424
|
+
" mov.b32 {low,high}, %1;\n"
|
1425
|
+
" cvt.rzi.f16.f16 low, low;\n"
|
1426
|
+
" cvt.rzi.f16.f16 high, high;\n"
|
1427
|
+
" mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h)));
|
1428
|
+
return val;
|
1429
|
+
}
|
1430
|
+
__CUDA_FP16_DECL__ __half2 h2ceil(const __half2 h)
|
1431
|
+
{
|
1432
|
+
__half2 val;
|
1433
|
+
asm("{.reg .f16 low,high;\n"
|
1434
|
+
" mov.b32 {low,high}, %1;\n"
|
1435
|
+
" cvt.rpi.f16.f16 low, low;\n"
|
1436
|
+
" cvt.rpi.f16.f16 high, high;\n"
|
1437
|
+
" mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h)));
|
1438
|
+
return val;
|
1439
|
+
}
|
1440
|
+
__CUDA_FP16_DECL__ __half2 h2floor(const __half2 h)
|
1441
|
+
{
|
1442
|
+
__half2 val;
|
1443
|
+
asm("{.reg .f16 low,high;\n"
|
1444
|
+
" mov.b32 {low,high}, %1;\n"
|
1445
|
+
" cvt.rmi.f16.f16 low, low;\n"
|
1446
|
+
" cvt.rmi.f16.f16 high, high;\n"
|
1447
|
+
" mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h)));
|
1448
|
+
return val;
|
1449
|
+
}
|
1450
|
+
__CUDA_FP16_DECL__ __half2 h2rint(const __half2 h)
|
1451
|
+
{
|
1452
|
+
__half2 val;
|
1453
|
+
asm("{.reg .f16 low,high;\n"
|
1454
|
+
" mov.b32 {low,high}, %1;\n"
|
1455
|
+
" cvt.rni.f16.f16 low, low;\n"
|
1456
|
+
" cvt.rni.f16.f16 high, high;\n"
|
1457
|
+
" mov.b32 %0, {low,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(h)));
|
1458
|
+
return val;
|
1459
|
+
}
|
1460
|
+
#endif /* defined(__CUDACC__) || defined(_NVHPC_CUDA) */
|
1461
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __lows2half2(const __half2 a, const __half2 b)
|
1462
|
+
{
|
1463
|
+
__half2 val;
|
1464
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1465
|
+
asm("{.reg .f16 alow,ahigh,blow,bhigh;\n"
|
1466
|
+
" mov.b32 {alow,ahigh}, %1;\n"
|
1467
|
+
" mov.b32 {blow,bhigh}, %2;\n"
|
1468
|
+
" mov.b32 %0, {alow,blow};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(b)));
|
1469
|
+
,
|
1470
|
+
val.x = a.x;
|
1471
|
+
val.y = b.x;
|
1472
|
+
)
|
1473
|
+
return val;
|
1474
|
+
}
|
1475
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __highs2half2(const __half2 a, const __half2 b)
|
1476
|
+
{
|
1477
|
+
__half2 val;
|
1478
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1479
|
+
asm("{.reg .f16 alow,ahigh,blow,bhigh;\n"
|
1480
|
+
" mov.b32 {alow,ahigh}, %1;\n"
|
1481
|
+
" mov.b32 {blow,bhigh}, %2;\n"
|
1482
|
+
" mov.b32 %0, {ahigh,bhigh};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(b)));
|
1483
|
+
,
|
1484
|
+
val.x = a.y;
|
1485
|
+
val.y = b.y;
|
1486
|
+
)
|
1487
|
+
return val;
|
1488
|
+
}
|
1489
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __low2half(const __half2 a)
|
1490
|
+
{
|
1491
|
+
__half ret;
|
1492
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1493
|
+
asm("{.reg .f16 low,high;\n"
|
1494
|
+
" mov.b32 {low,high}, %1;\n"
|
1495
|
+
" mov.b16 %0, low;}" : "=h"(__HALF_TO_US(ret)) : "r"(__HALF2_TO_CUI(a)));
|
1496
|
+
,
|
1497
|
+
ret = a.x;
|
1498
|
+
)
|
1499
|
+
return ret;
|
1500
|
+
}
|
1501
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ int __hisinf(const __half a)
|
1502
|
+
{
|
1503
|
+
int retval;
|
1504
|
+
const __half_raw araw = __half_raw(a);
|
1505
|
+
if (araw.x == 0xFC00U) {
|
1506
|
+
retval = -1;
|
1507
|
+
} else if (araw.x == 0x7C00U) {
|
1508
|
+
retval = 1;
|
1509
|
+
} else {
|
1510
|
+
retval = 0;
|
1511
|
+
}
|
1512
|
+
return retval;
|
1513
|
+
}
|
1514
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __low2half2(const __half2 a)
|
1515
|
+
{
|
1516
|
+
__half2 val;
|
1517
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1518
|
+
asm("{.reg .f16 low,high;\n"
|
1519
|
+
" mov.b32 {low,high}, %1;\n"
|
1520
|
+
" mov.b32 %0, {low,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
1521
|
+
,
|
1522
|
+
val.x = a.x;
|
1523
|
+
val.y = a.x;
|
1524
|
+
)
|
1525
|
+
return val;
|
1526
|
+
}
|
1527
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __high2half2(const __half2 a)
|
1528
|
+
{
|
1529
|
+
__half2 val;
|
1530
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1531
|
+
asm("{.reg .f16 low,high;\n"
|
1532
|
+
" mov.b32 {low,high}, %1;\n"
|
1533
|
+
" mov.b32 %0, {high,high};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
1534
|
+
,
|
1535
|
+
val.x = a.y;
|
1536
|
+
val.y = a.y;
|
1537
|
+
)
|
1538
|
+
return val;
|
1539
|
+
}
|
1540
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __high2half(const __half2 a)
|
1541
|
+
{
|
1542
|
+
__half ret;
|
1543
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1544
|
+
asm("{.reg .f16 low,high;\n"
|
1545
|
+
" mov.b32 {low,high}, %1;\n"
|
1546
|
+
" mov.b16 %0, high;}" : "=h"(__HALF_TO_US(ret)) : "r"(__HALF2_TO_CUI(a)));
|
1547
|
+
,
|
1548
|
+
ret = a.y;
|
1549
|
+
)
|
1550
|
+
return ret;
|
1551
|
+
}
|
1552
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __halves2half2(const __half a, const __half b)
|
1553
|
+
{
|
1554
|
+
__half2 val;
|
1555
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1556
|
+
asm("{ mov.b32 %0, {%1,%2};}\n"
|
1557
|
+
: "=r"(__HALF2_TO_UI(val)) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(b)));
|
1558
|
+
,
|
1559
|
+
val.x = a;
|
1560
|
+
val.y = b;
|
1561
|
+
)
|
1562
|
+
return val;
|
1563
|
+
}
|
1564
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __half2half2(const __half a)
|
1565
|
+
{
|
1566
|
+
__half2 val;
|
1567
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1568
|
+
asm("{ mov.b32 %0, {%1,%1};}\n"
|
1569
|
+
: "=r"(__HALF2_TO_UI(val)) : "h"(__HALF_TO_CUS(a)));
|
1570
|
+
,
|
1571
|
+
val.x = a;
|
1572
|
+
val.y = a;
|
1573
|
+
)
|
1574
|
+
return val;
|
1575
|
+
}
|
1576
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __lowhigh2highlow(const __half2 a)
|
1577
|
+
{
|
1578
|
+
__half2 val;
|
1579
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1580
|
+
asm("{.reg .f16 low,high;\n"
|
1581
|
+
" mov.b32 {low,high}, %1;\n"
|
1582
|
+
" mov.b32 %0, {high,low};}\n" : "=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
1583
|
+
,
|
1584
|
+
val.x = a.y;
|
1585
|
+
val.y = a.x;
|
1586
|
+
)
|
1587
|
+
return val;
|
1588
|
+
}
|
1589
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ short int __half_as_short(const __half h)
|
1590
|
+
{
|
1591
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1592
|
+
return static_cast<short int>(__HALF_TO_CUS(h));
|
1593
|
+
,
|
1594
|
+
return static_cast<short int>(__half_raw(h).x);
|
1595
|
+
)
|
1596
|
+
}
|
1597
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned short int __half_as_ushort(const __half h)
|
1598
|
+
{
|
1599
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1600
|
+
return __HALF_TO_CUS(h);
|
1601
|
+
,
|
1602
|
+
return __half_raw(h).x;
|
1603
|
+
)
|
1604
|
+
}
|
1605
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __short_as_half(const short int i)
|
1606
|
+
{
|
1607
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1608
|
+
__half h;
|
1609
|
+
__HALF_TO_US(h) = static_cast<unsigned short int>(i);
|
1610
|
+
return h;
|
1611
|
+
,
|
1612
|
+
__half_raw hr;
|
1613
|
+
hr.x = static_cast<unsigned short int>(i);
|
1614
|
+
return __half(hr);
|
1615
|
+
)
|
1616
|
+
}
|
1617
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __ushort_as_half(const unsigned short int i)
|
1618
|
+
{
|
1619
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1620
|
+
__half h;
|
1621
|
+
__HALF_TO_US(h) = i;
|
1622
|
+
return h;
|
1623
|
+
,
|
1624
|
+
__half_raw hr;
|
1625
|
+
hr.x = i;
|
1626
|
+
return __half(hr);)
|
1627
|
+
}
|
1628
|
+
|
1629
|
+
/******************************************************************************
|
1630
|
+
* __half arithmetic *
|
1631
|
+
******************************************************************************/
|
1632
|
+
#if defined(__CUDACC__) || defined(_NVHPC_CUDA)
|
1633
|
+
__CUDA_FP16_DECL__ __half __internal_device_hmax(const __half a, const __half b)
|
1634
|
+
{
|
1635
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
|
1636
|
+
__BINARY_OP_HALF_MACRO(max)
|
1637
|
+
,
|
1638
|
+
const float fa = __half2float(a);
|
1639
|
+
const float fb = __half2float(b);
|
1640
|
+
float fr;
|
1641
|
+
asm("{max.f32 %0,%1,%2;\n}"
|
1642
|
+
:"=f"(fr) : "f"(fa), "f"(fb));
|
1643
|
+
const __half hr = __float2half(fr);
|
1644
|
+
return hr;
|
1645
|
+
)
|
1646
|
+
}
|
1647
|
+
__CUDA_FP16_DECL__ __half __internal_device_hmin(const __half a, const __half b)
|
1648
|
+
{
|
1649
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
|
1650
|
+
__BINARY_OP_HALF_MACRO(min)
|
1651
|
+
,
|
1652
|
+
const float fa = __half2float(a);
|
1653
|
+
const float fb = __half2float(b);
|
1654
|
+
float fr;
|
1655
|
+
asm("{min.f32 %0,%1,%2;\n}"
|
1656
|
+
:"=f"(fr) : "f"(fa), "f"(fb));
|
1657
|
+
const __half hr = __float2half(fr);
|
1658
|
+
return hr;
|
1659
|
+
)
|
1660
|
+
}
|
1661
|
+
#endif /* defined(__CUDACC__) || defined(_NVHPC_CUDA) */
|
1662
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hmax(const __half a, const __half b)
|
1663
|
+
{
|
1664
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1665
|
+
return __internal_device_hmax(a, b);
|
1666
|
+
,
|
1667
|
+
__half maxval;
|
1668
|
+
|
1669
|
+
maxval = (__hge(a, b) || __hisnan(b)) ? a : b;
|
1670
|
+
|
1671
|
+
if (__hisnan(maxval))
|
1672
|
+
{
|
1673
|
+
// if both inputs are NaN, return canonical NaN
|
1674
|
+
maxval = CUDART_NAN_FP16;
|
1675
|
+
}
|
1676
|
+
else if (__heq(a, b))
|
1677
|
+
{
|
1678
|
+
// hmax(+0.0, -0.0) = +0.0
|
1679
|
+
// unsigned compare 0x8000U > 0x0000U
|
1680
|
+
__half_raw ra = __half_raw(a);
|
1681
|
+
__half_raw rb = __half_raw(b);
|
1682
|
+
maxval = (ra.x > rb.x) ? b : a;
|
1683
|
+
}
|
1684
|
+
return maxval;
|
1685
|
+
)
|
1686
|
+
}
|
1687
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hmin(const __half a, const __half b)
|
1688
|
+
{
|
1689
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
1690
|
+
return __internal_device_hmin(a, b);
|
1691
|
+
,
|
1692
|
+
__half minval;
|
1693
|
+
|
1694
|
+
minval = (__hle(a, b) || __hisnan(b)) ? a : b;
|
1695
|
+
|
1696
|
+
if (__hisnan(minval))
|
1697
|
+
{
|
1698
|
+
// if both inputs are NaN, return canonical NaN
|
1699
|
+
minval = CUDART_NAN_FP16;
|
1700
|
+
}
|
1701
|
+
else if (__heq(a, b))
|
1702
|
+
{
|
1703
|
+
// hmin(+0.0, -0.0) = -0.0
|
1704
|
+
// unsigned compare 0x8000U > 0x0000U
|
1705
|
+
__half_raw ra = __half_raw(a);
|
1706
|
+
__half_raw rb = __half_raw(b);
|
1707
|
+
minval = (ra.x > rb.x) ? a : b;
|
1708
|
+
}
|
1709
|
+
|
1710
|
+
return minval;
|
1711
|
+
)
|
1712
|
+
}
|
1713
|
+
|
1714
|
+
|
1715
|
+
/******************************************************************************
|
1716
|
+
* __half2 arithmetic *
|
1717
|
+
******************************************************************************/
|
1718
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hmax2(const __half2 a, const __half2 b)
|
1719
|
+
{
|
1720
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
|
1721
|
+
__BINARY_OP_HALF2_MACRO(max)
|
1722
|
+
,
|
1723
|
+
__half2 val;
|
1724
|
+
val.x = __hmax(a.x, b.x);
|
1725
|
+
val.y = __hmax(a.y, b.y);
|
1726
|
+
return val;
|
1727
|
+
)
|
1728
|
+
}
|
1729
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hmin2(const __half2 a, const __half2 b)
|
1730
|
+
{
|
1731
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
|
1732
|
+
__BINARY_OP_HALF2_MACRO(min)
|
1733
|
+
,
|
1734
|
+
__half2 val;
|
1735
|
+
val.x = __hmin(a.x, b.x);
|
1736
|
+
val.y = __hmin(a.y, b.y);
|
1737
|
+
return val;
|
1738
|
+
)
|
1739
|
+
}
|
1740
|
+
|
1741
|
+
#if defined(__CUDACC__) || defined(_NVHPC_CUDA)
|
1742
|
+
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 300) || defined(_NVHPC_CUDA)
|
1743
|
+
/******************************************************************************
|
1744
|
+
* __half, __half2 warp shuffle *
|
1745
|
+
******************************************************************************/
|
1746
|
+
#define __SHUFFLE_HALF2_MACRO(name) /* do */ {\
|
1747
|
+
__half2 r; \
|
1748
|
+
asm volatile ("{" __CUDA_FP16_STRINGIFY(name) " %0,%1,%2,%3;\n}" \
|
1749
|
+
:"=r"(__HALF2_TO_UI(r)): "r"(__HALF2_TO_CUI(var)), "r"(delta), "r"(c)); \
|
1750
|
+
return r; \
|
1751
|
+
} /* while(0) */
|
1752
|
+
|
1753
|
+
#define __SHUFFLE_SYNC_HALF2_MACRO(name, var, delta, c, mask) /* do */ {\
|
1754
|
+
__half2 r; \
|
1755
|
+
asm volatile ("{" __CUDA_FP16_STRINGIFY(name) " %0,%1,%2,%3,%4;\n}" \
|
1756
|
+
:"=r"(__HALF2_TO_UI(r)): "r"(__HALF2_TO_CUI(var)), "r"(delta), "r"(c), "r"(mask)); \
|
1757
|
+
return r; \
|
1758
|
+
} /* while(0) */
|
1759
|
+
|
1760
|
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ < 700)
|
1761
|
+
|
1762
|
+
__CUDA_FP16_DECL__ __half2 __shfl(const __half2 var, const int delta, const int width)
|
1763
|
+
{
|
1764
|
+
unsigned int warp_size;
|
1765
|
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
1766
|
+
const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
|
1767
|
+
__SHUFFLE_HALF2_MACRO(shfl.idx.b32)
|
1768
|
+
}
|
1769
|
+
__CUDA_FP16_DECL__ __half2 __shfl_up(const __half2 var, const unsigned int delta, const int width)
|
1770
|
+
{
|
1771
|
+
unsigned int warp_size;
|
1772
|
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
1773
|
+
const unsigned int c = (warp_size - static_cast<unsigned>(width)) << 8U;
|
1774
|
+
__SHUFFLE_HALF2_MACRO(shfl.up.b32)
|
1775
|
+
}
|
1776
|
+
__CUDA_FP16_DECL__ __half2 __shfl_down(const __half2 var, const unsigned int delta, const int width)
|
1777
|
+
{
|
1778
|
+
unsigned int warp_size;
|
1779
|
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
1780
|
+
const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
|
1781
|
+
__SHUFFLE_HALF2_MACRO(shfl.down.b32)
|
1782
|
+
}
|
1783
|
+
__CUDA_FP16_DECL__ __half2 __shfl_xor(const __half2 var, const int delta, const int width)
|
1784
|
+
{
|
1785
|
+
unsigned int warp_size;
|
1786
|
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
1787
|
+
const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
|
1788
|
+
__SHUFFLE_HALF2_MACRO(shfl.bfly.b32)
|
1789
|
+
}
|
1790
|
+
|
1791
|
+
#endif /* defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ < 700) */
|
1792
|
+
|
1793
|
+
__CUDA_FP16_DECL__ __half2 __shfl_sync(const unsigned int mask, const __half2 var, const int srcLane, const int width)
|
1794
|
+
{
|
1795
|
+
unsigned int warp_size;
|
1796
|
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
1797
|
+
const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
|
1798
|
+
__SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.idx.b32, var, srcLane, c, mask)
|
1799
|
+
}
|
1800
|
+
__CUDA_FP16_DECL__ __half2 __shfl_up_sync(const unsigned int mask, const __half2 var, const unsigned int delta, const int width)
|
1801
|
+
{
|
1802
|
+
unsigned int warp_size;
|
1803
|
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
1804
|
+
const unsigned int c = (warp_size - static_cast<unsigned>(width)) << 8U;
|
1805
|
+
__SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.up.b32, var, delta, c, mask)
|
1806
|
+
}
|
1807
|
+
__CUDA_FP16_DECL__ __half2 __shfl_down_sync(const unsigned int mask, const __half2 var, const unsigned int delta, const int width)
|
1808
|
+
{
|
1809
|
+
unsigned int warp_size;
|
1810
|
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
1811
|
+
const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
|
1812
|
+
__SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.down.b32, var, delta, c, mask)
|
1813
|
+
}
|
1814
|
+
__CUDA_FP16_DECL__ __half2 __shfl_xor_sync(const unsigned int mask, const __half2 var, const int laneMask, const int width)
|
1815
|
+
{
|
1816
|
+
unsigned int warp_size;
|
1817
|
+
asm("{mov.u32 %0, WARP_SZ;\n}" : "=r"(warp_size));
|
1818
|
+
const unsigned int c = ((warp_size - static_cast<unsigned>(width)) << 8U) | 0x1fU;
|
1819
|
+
__SHUFFLE_SYNC_HALF2_MACRO(shfl.sync.bfly.b32, var, laneMask, c, mask)
|
1820
|
+
}
|
1821
|
+
|
1822
|
+
#undef __SHUFFLE_HALF2_MACRO
|
1823
|
+
#undef __SHUFFLE_SYNC_HALF2_MACRO
|
1824
|
+
|
1825
|
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ < 700)
|
1826
|
+
|
1827
|
+
__CUDA_FP16_DECL__ __half __shfl(const __half var, const int delta, const int width)
|
1828
|
+
{
|
1829
|
+
const __half2 temp1 = __halves2half2(var, var);
|
1830
|
+
const __half2 temp2 = __shfl(temp1, delta, width);
|
1831
|
+
return __low2half(temp2);
|
1832
|
+
}
|
1833
|
+
__CUDA_FP16_DECL__ __half __shfl_up(const __half var, const unsigned int delta, const int width)
|
1834
|
+
{
|
1835
|
+
const __half2 temp1 = __halves2half2(var, var);
|
1836
|
+
const __half2 temp2 = __shfl_up(temp1, delta, width);
|
1837
|
+
return __low2half(temp2);
|
1838
|
+
}
|
1839
|
+
__CUDA_FP16_DECL__ __half __shfl_down(const __half var, const unsigned int delta, const int width)
|
1840
|
+
{
|
1841
|
+
const __half2 temp1 = __halves2half2(var, var);
|
1842
|
+
const __half2 temp2 = __shfl_down(temp1, delta, width);
|
1843
|
+
return __low2half(temp2);
|
1844
|
+
}
|
1845
|
+
__CUDA_FP16_DECL__ __half __shfl_xor(const __half var, const int delta, const int width)
|
1846
|
+
{
|
1847
|
+
const __half2 temp1 = __halves2half2(var, var);
|
1848
|
+
const __half2 temp2 = __shfl_xor(temp1, delta, width);
|
1849
|
+
return __low2half(temp2);
|
1850
|
+
}
|
1851
|
+
|
1852
|
+
#endif /* defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ < 700) */
|
1853
|
+
|
1854
|
+
__CUDA_FP16_DECL__ __half __shfl_sync(const unsigned int mask, const __half var, const int srcLane, const int width)
|
1855
|
+
{
|
1856
|
+
const __half2 temp1 = __halves2half2(var, var);
|
1857
|
+
const __half2 temp2 = __shfl_sync(mask, temp1, srcLane, width);
|
1858
|
+
return __low2half(temp2);
|
1859
|
+
}
|
1860
|
+
__CUDA_FP16_DECL__ __half __shfl_up_sync(const unsigned int mask, const __half var, const unsigned int delta, const int width)
|
1861
|
+
{
|
1862
|
+
const __half2 temp1 = __halves2half2(var, var);
|
1863
|
+
const __half2 temp2 = __shfl_up_sync(mask, temp1, delta, width);
|
1864
|
+
return __low2half(temp2);
|
1865
|
+
}
|
1866
|
+
__CUDA_FP16_DECL__ __half __shfl_down_sync(const unsigned int mask, const __half var, const unsigned int delta, const int width)
|
1867
|
+
{
|
1868
|
+
const __half2 temp1 = __halves2half2(var, var);
|
1869
|
+
const __half2 temp2 = __shfl_down_sync(mask, temp1, delta, width);
|
1870
|
+
return __low2half(temp2);
|
1871
|
+
}
|
1872
|
+
__CUDA_FP16_DECL__ __half __shfl_xor_sync(const unsigned int mask, const __half var, const int laneMask, const int width)
|
1873
|
+
{
|
1874
|
+
const __half2 temp1 = __halves2half2(var, var);
|
1875
|
+
const __half2 temp2 = __shfl_xor_sync(mask, temp1, laneMask, width);
|
1876
|
+
return __low2half(temp2);
|
1877
|
+
}
|
1878
|
+
|
1879
|
+
#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 300) || defined(_NVHPC_CUDA) */
|
1880
|
+
/******************************************************************************
|
1881
|
+
* __half and __half2 __ldg,__ldcg,__ldca,__ldcs *
|
1882
|
+
******************************************************************************/
|
1883
|
+
|
1884
|
+
#if defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320) || defined(_NVHPC_CUDA))
|
1885
|
+
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
|
1886
|
+
#define __LDG_PTR "l"
|
1887
|
+
#else
|
1888
|
+
#define __LDG_PTR "r"
|
1889
|
+
#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/
|
1890
|
+
__CUDA_FP16_DECL__ __half2 __ldg(const __half2 *const ptr)
|
1891
|
+
{
|
1892
|
+
__half2 ret;
|
1893
|
+
asm ("ld.global.nc.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr));
|
1894
|
+
return ret;
|
1895
|
+
}
|
1896
|
+
__CUDA_FP16_DECL__ __half __ldg(const __half *const ptr)
|
1897
|
+
{
|
1898
|
+
__half ret;
|
1899
|
+
asm ("ld.global.nc.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr));
|
1900
|
+
return ret;
|
1901
|
+
}
|
1902
|
+
__CUDA_FP16_DECL__ __half2 __ldcg(const __half2 *const ptr)
|
1903
|
+
{
|
1904
|
+
__half2 ret;
|
1905
|
+
asm ("ld.global.cg.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr));
|
1906
|
+
return ret;
|
1907
|
+
}
|
1908
|
+
__CUDA_FP16_DECL__ __half __ldcg(const __half *const ptr)
|
1909
|
+
{
|
1910
|
+
__half ret;
|
1911
|
+
asm ("ld.global.cg.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr));
|
1912
|
+
return ret;
|
1913
|
+
}
|
1914
|
+
__CUDA_FP16_DECL__ __half2 __ldca(const __half2 *const ptr)
|
1915
|
+
{
|
1916
|
+
__half2 ret;
|
1917
|
+
asm ("ld.global.ca.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr));
|
1918
|
+
return ret;
|
1919
|
+
}
|
1920
|
+
__CUDA_FP16_DECL__ __half __ldca(const __half *const ptr)
|
1921
|
+
{
|
1922
|
+
__half ret;
|
1923
|
+
asm ("ld.global.ca.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr));
|
1924
|
+
return ret;
|
1925
|
+
}
|
1926
|
+
__CUDA_FP16_DECL__ __half2 __ldcs(const __half2 *const ptr)
|
1927
|
+
{
|
1928
|
+
__half2 ret;
|
1929
|
+
asm ("ld.global.cs.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr));
|
1930
|
+
return ret;
|
1931
|
+
}
|
1932
|
+
__CUDA_FP16_DECL__ __half __ldcs(const __half *const ptr)
|
1933
|
+
{
|
1934
|
+
__half ret;
|
1935
|
+
asm ("ld.global.cs.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr));
|
1936
|
+
return ret;
|
1937
|
+
}
|
1938
|
+
__CUDA_FP16_DECL__ __half2 __ldlu(const __half2 *const ptr)
|
1939
|
+
{
|
1940
|
+
__half2 ret;
|
1941
|
+
asm ("ld.global.lu.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr) : "memory");
|
1942
|
+
return ret;
|
1943
|
+
}
|
1944
|
+
__CUDA_FP16_DECL__ __half __ldlu(const __half *const ptr)
|
1945
|
+
{
|
1946
|
+
__half ret;
|
1947
|
+
asm ("ld.global.lu.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr) : "memory");
|
1948
|
+
return ret;
|
1949
|
+
}
|
1950
|
+
__CUDA_FP16_DECL__ __half2 __ldcv(const __half2 *const ptr)
|
1951
|
+
{
|
1952
|
+
__half2 ret;
|
1953
|
+
asm ("ld.global.cv.b32 %0, [%1];" : "=r"(__HALF2_TO_UI(ret)) : __LDG_PTR(ptr) : "memory");
|
1954
|
+
return ret;
|
1955
|
+
}
|
1956
|
+
__CUDA_FP16_DECL__ __half __ldcv(const __half *const ptr)
|
1957
|
+
{
|
1958
|
+
__half ret;
|
1959
|
+
asm ("ld.global.cv.b16 %0, [%1];" : "=h"(__HALF_TO_US(ret)) : __LDG_PTR(ptr) : "memory");
|
1960
|
+
return ret;
|
1961
|
+
}
|
1962
|
+
__CUDA_FP16_DECL__ void __stwb(__half2 *const ptr, const __half2 value)
|
1963
|
+
{
|
1964
|
+
asm ("st.global.wb.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory");
|
1965
|
+
}
|
1966
|
+
__CUDA_FP16_DECL__ void __stwb(__half *const ptr, const __half value)
|
1967
|
+
{
|
1968
|
+
asm ("st.global.wb.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory");
|
1969
|
+
}
|
1970
|
+
__CUDA_FP16_DECL__ void __stcg(__half2 *const ptr, const __half2 value)
|
1971
|
+
{
|
1972
|
+
asm ("st.global.cg.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory");
|
1973
|
+
}
|
1974
|
+
__CUDA_FP16_DECL__ void __stcg(__half *const ptr, const __half value)
|
1975
|
+
{
|
1976
|
+
asm ("st.global.cg.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory");
|
1977
|
+
}
|
1978
|
+
__CUDA_FP16_DECL__ void __stcs(__half2 *const ptr, const __half2 value)
|
1979
|
+
{
|
1980
|
+
asm ("st.global.cs.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory");
|
1981
|
+
}
|
1982
|
+
__CUDA_FP16_DECL__ void __stcs(__half *const ptr, const __half value)
|
1983
|
+
{
|
1984
|
+
asm ("st.global.cs.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory");
|
1985
|
+
}
|
1986
|
+
__CUDA_FP16_DECL__ void __stwt(__half2 *const ptr, const __half2 value)
|
1987
|
+
{
|
1988
|
+
asm ("st.global.wt.b32 [%0], %1;" :: __LDG_PTR(ptr), "r"(__HALF2_TO_CUI(value)) : "memory");
|
1989
|
+
}
|
1990
|
+
__CUDA_FP16_DECL__ void __stwt(__half *const ptr, const __half value)
|
1991
|
+
{
|
1992
|
+
asm ("st.global.wt.b16 [%0], %1;" :: __LDG_PTR(ptr), "h"(__HALF_TO_CUS(value)) : "memory");
|
1993
|
+
}
|
1994
|
+
#undef __LDG_PTR
|
1995
|
+
#endif /* defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320) || defined(_NVHPC_CUDA)) */
|
1996
|
+
#endif /* defined(__CUDACC__) || defined(_NVHPC_CUDA) */
|
1997
|
+
|
1998
|
+
/******************************************************************************
|
1999
|
+
* __half2 comparison *
|
2000
|
+
******************************************************************************/
|
2001
|
+
#define __COMPARISON_OP_HALF2_MACRO(name) /* do */ {\
|
2002
|
+
__half2 val; \
|
2003
|
+
asm( "{ " __CUDA_FP16_STRINGIFY(name) ".f16x2.f16x2 %0,%1,%2;\n}" \
|
2004
|
+
:"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \
|
2005
|
+
return val; \
|
2006
|
+
} /* while(0) */
|
2007
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __heq2(const __half2 a, const __half2 b)
|
2008
|
+
{
|
2009
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2010
|
+
__COMPARISON_OP_HALF2_MACRO(set.eq)
|
2011
|
+
,
|
2012
|
+
__half2_raw val;
|
2013
|
+
val.x = __heq(a.x, b.x) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2014
|
+
val.y = __heq(a.y, b.y) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2015
|
+
return __half2(val);
|
2016
|
+
)
|
2017
|
+
}
|
2018
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hne2(const __half2 a, const __half2 b)
|
2019
|
+
{
|
2020
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2021
|
+
__COMPARISON_OP_HALF2_MACRO(set.ne)
|
2022
|
+
,
|
2023
|
+
__half2_raw val;
|
2024
|
+
val.x = __hne(a.x, b.x) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2025
|
+
val.y = __hne(a.y, b.y) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2026
|
+
return __half2(val);
|
2027
|
+
)
|
2028
|
+
}
|
2029
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hle2(const __half2 a, const __half2 b)
|
2030
|
+
{
|
2031
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2032
|
+
__COMPARISON_OP_HALF2_MACRO(set.le)
|
2033
|
+
,
|
2034
|
+
__half2_raw val;
|
2035
|
+
val.x = __hle(a.x, b.x) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2036
|
+
val.y = __hle(a.y, b.y) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2037
|
+
return __half2(val);
|
2038
|
+
)
|
2039
|
+
}
|
2040
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hge2(const __half2 a, const __half2 b)
|
2041
|
+
{
|
2042
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2043
|
+
__COMPARISON_OP_HALF2_MACRO(set.ge)
|
2044
|
+
,
|
2045
|
+
__half2_raw val;
|
2046
|
+
val.x = __hge(a.x, b.x) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2047
|
+
val.y = __hge(a.y, b.y) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2048
|
+
return __half2(val);
|
2049
|
+
)
|
2050
|
+
}
|
2051
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hlt2(const __half2 a, const __half2 b)
|
2052
|
+
{
|
2053
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2054
|
+
__COMPARISON_OP_HALF2_MACRO(set.lt)
|
2055
|
+
,
|
2056
|
+
__half2_raw val;
|
2057
|
+
val.x = __hlt(a.x, b.x) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2058
|
+
val.y = __hlt(a.y, b.y) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2059
|
+
return __half2(val);
|
2060
|
+
)
|
2061
|
+
}
|
2062
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hgt2(const __half2 a, const __half2 b)
|
2063
|
+
{
|
2064
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2065
|
+
__COMPARISON_OP_HALF2_MACRO(set.gt)
|
2066
|
+
,
|
2067
|
+
__half2_raw val;
|
2068
|
+
val.x = __hgt(a.x, b.x) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2069
|
+
val.y = __hgt(a.y, b.y) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2070
|
+
return __half2(val);
|
2071
|
+
)
|
2072
|
+
}
|
2073
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hequ2(const __half2 a, const __half2 b)
|
2074
|
+
{
|
2075
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2076
|
+
__COMPARISON_OP_HALF2_MACRO(set.equ)
|
2077
|
+
,
|
2078
|
+
__half2_raw val;
|
2079
|
+
val.x = __hequ(a.x, b.x) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2080
|
+
val.y = __hequ(a.y, b.y) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2081
|
+
return __half2(val);
|
2082
|
+
)
|
2083
|
+
}
|
2084
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hneu2(const __half2 a, const __half2 b)
|
2085
|
+
{
|
2086
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2087
|
+
__COMPARISON_OP_HALF2_MACRO(set.neu)
|
2088
|
+
,
|
2089
|
+
__half2_raw val;
|
2090
|
+
val.x = __hneu(a.x, b.x) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2091
|
+
val.y = __hneu(a.y, b.y) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2092
|
+
return __half2(val);
|
2093
|
+
)
|
2094
|
+
}
|
2095
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hleu2(const __half2 a, const __half2 b)
|
2096
|
+
{
|
2097
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2098
|
+
__COMPARISON_OP_HALF2_MACRO(set.leu)
|
2099
|
+
,
|
2100
|
+
__half2_raw val;
|
2101
|
+
val.x = __hleu(a.x, b.x) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2102
|
+
val.y = __hleu(a.y, b.y) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2103
|
+
return __half2(val);
|
2104
|
+
)
|
2105
|
+
}
|
2106
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hgeu2(const __half2 a, const __half2 b)
|
2107
|
+
{
|
2108
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2109
|
+
__COMPARISON_OP_HALF2_MACRO(set.geu)
|
2110
|
+
,
|
2111
|
+
__half2_raw val;
|
2112
|
+
val.x = __hgeu(a.x, b.x) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2113
|
+
val.y = __hgeu(a.y, b.y) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2114
|
+
return __half2(val);
|
2115
|
+
)
|
2116
|
+
}
|
2117
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hltu2(const __half2 a, const __half2 b)
|
2118
|
+
{
|
2119
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2120
|
+
__COMPARISON_OP_HALF2_MACRO(set.ltu)
|
2121
|
+
,
|
2122
|
+
__half2_raw val;
|
2123
|
+
val.x = __hltu(a.x, b.x) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2124
|
+
val.y = __hltu(a.y, b.y) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2125
|
+
return __half2(val);
|
2126
|
+
)
|
2127
|
+
}
|
2128
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hgtu2(const __half2 a, const __half2 b)
|
2129
|
+
{
|
2130
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2131
|
+
__COMPARISON_OP_HALF2_MACRO(set.gtu)
|
2132
|
+
,
|
2133
|
+
__half2_raw val;
|
2134
|
+
val.x = __hgtu(a.x, b.x) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2135
|
+
val.y = __hgtu(a.y, b.y) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
2136
|
+
return __half2(val);
|
2137
|
+
)
|
2138
|
+
}
|
2139
|
+
#undef __COMPARISON_OP_HALF2_MACRO
|
2140
|
+
/******************************************************************************
|
2141
|
+
* __half2 comparison with mask output *
|
2142
|
+
******************************************************************************/
|
2143
|
+
#define __COMPARISON_OP_HALF2_MACRO_MASK(name) /* do */ {\
|
2144
|
+
unsigned val; \
|
2145
|
+
asm( "{ " __CUDA_FP16_STRINGIFY(name) ".u32.f16x2 %0,%1,%2;\n}" \
|
2146
|
+
:"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)),"r"(__HALF2_TO_CUI(b))); \
|
2147
|
+
return val; \
|
2148
|
+
} /* while(0) */
|
2149
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __heq2_mask(const __half2 a, const __half2 b)
|
2150
|
+
{
|
2151
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2152
|
+
__COMPARISON_OP_HALF2_MACRO_MASK(set.eq)
|
2153
|
+
,
|
2154
|
+
const unsigned short px = __heq(a.x, b.x) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2155
|
+
const unsigned short py = __heq(a.y, b.y) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2156
|
+
unsigned ur = (unsigned)py;
|
2157
|
+
ur <<= (unsigned)16U;
|
2158
|
+
ur |= (unsigned)px;
|
2159
|
+
return ur;
|
2160
|
+
)
|
2161
|
+
}
|
2162
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __hne2_mask(const __half2 a, const __half2 b)
|
2163
|
+
{
|
2164
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2165
|
+
__COMPARISON_OP_HALF2_MACRO_MASK(set.ne)
|
2166
|
+
,
|
2167
|
+
const unsigned short px = __hne(a.x, b.x) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2168
|
+
const unsigned short py = __hne(a.y, b.y) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2169
|
+
unsigned ur = (unsigned)py;
|
2170
|
+
ur <<= (unsigned)16U;
|
2171
|
+
ur |= (unsigned)px;
|
2172
|
+
return ur;
|
2173
|
+
)
|
2174
|
+
}
|
2175
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __hle2_mask(const __half2 a, const __half2 b)
|
2176
|
+
{
|
2177
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2178
|
+
__COMPARISON_OP_HALF2_MACRO_MASK(set.le)
|
2179
|
+
,
|
2180
|
+
const unsigned short px = __hle(a.x, b.x) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2181
|
+
const unsigned short py = __hle(a.y, b.y) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2182
|
+
unsigned ur = (unsigned)py;
|
2183
|
+
ur <<= (unsigned)16U;
|
2184
|
+
ur |= (unsigned)px;
|
2185
|
+
return ur;
|
2186
|
+
)
|
2187
|
+
}
|
2188
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __hge2_mask(const __half2 a, const __half2 b)
|
2189
|
+
{
|
2190
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2191
|
+
__COMPARISON_OP_HALF2_MACRO_MASK(set.ge)
|
2192
|
+
,
|
2193
|
+
const unsigned short px = __hge(a.x, b.x) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2194
|
+
const unsigned short py = __hge(a.y, b.y) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2195
|
+
unsigned ur = (unsigned)py;
|
2196
|
+
ur <<= (unsigned)16U;
|
2197
|
+
ur |= (unsigned)px;
|
2198
|
+
return ur;
|
2199
|
+
)
|
2200
|
+
}
|
2201
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __hlt2_mask(const __half2 a, const __half2 b)
|
2202
|
+
{
|
2203
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2204
|
+
__COMPARISON_OP_HALF2_MACRO_MASK(set.lt)
|
2205
|
+
,
|
2206
|
+
const unsigned short px = __hlt(a.x, b.x) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2207
|
+
const unsigned short py = __hlt(a.y, b.y) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2208
|
+
unsigned ur = (unsigned)py;
|
2209
|
+
ur <<= (unsigned)16U;
|
2210
|
+
ur |= (unsigned)px;
|
2211
|
+
return ur;
|
2212
|
+
)
|
2213
|
+
}
|
2214
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __hgt2_mask(const __half2 a, const __half2 b)
|
2215
|
+
{
|
2216
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2217
|
+
__COMPARISON_OP_HALF2_MACRO_MASK(set.gt)
|
2218
|
+
,
|
2219
|
+
const unsigned short px = __hgt(a.x, b.x) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2220
|
+
const unsigned short py = __hgt(a.y, b.y) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2221
|
+
unsigned ur = (unsigned)py;
|
2222
|
+
ur <<= (unsigned)16U;
|
2223
|
+
ur |= (unsigned)px;
|
2224
|
+
return ur;
|
2225
|
+
)
|
2226
|
+
}
|
2227
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __hequ2_mask(const __half2 a, const __half2 b)
|
2228
|
+
{
|
2229
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2230
|
+
__COMPARISON_OP_HALF2_MACRO_MASK(set.equ)
|
2231
|
+
,
|
2232
|
+
const unsigned short px = __hequ(a.x, b.x) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2233
|
+
const unsigned short py = __hequ(a.y, b.y) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2234
|
+
unsigned ur = (unsigned)py;
|
2235
|
+
ur <<= (unsigned)16U;
|
2236
|
+
ur |= (unsigned)px;
|
2237
|
+
return ur;
|
2238
|
+
)
|
2239
|
+
}
|
2240
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __hneu2_mask(const __half2 a, const __half2 b)
|
2241
|
+
{
|
2242
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2243
|
+
__COMPARISON_OP_HALF2_MACRO_MASK(set.neu)
|
2244
|
+
,
|
2245
|
+
const unsigned short px = __hneu(a.x, b.x) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2246
|
+
const unsigned short py = __hneu(a.y, b.y) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2247
|
+
unsigned ur = (unsigned)py;
|
2248
|
+
ur <<= (unsigned)16U;
|
2249
|
+
ur |= (unsigned)px;
|
2250
|
+
return ur;
|
2251
|
+
)
|
2252
|
+
}
|
2253
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __hleu2_mask(const __half2 a, const __half2 b)
|
2254
|
+
{
|
2255
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2256
|
+
__COMPARISON_OP_HALF2_MACRO_MASK(set.leu)
|
2257
|
+
,
|
2258
|
+
const unsigned short px = __hleu(a.x, b.x) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2259
|
+
const unsigned short py = __hleu(a.y, b.y) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2260
|
+
unsigned ur = (unsigned)py;
|
2261
|
+
ur <<= (unsigned)16U;
|
2262
|
+
ur |= (unsigned)px;
|
2263
|
+
return ur;
|
2264
|
+
)
|
2265
|
+
}
|
2266
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __hgeu2_mask(const __half2 a, const __half2 b)
|
2267
|
+
{
|
2268
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2269
|
+
__COMPARISON_OP_HALF2_MACRO_MASK(set.geu)
|
2270
|
+
,
|
2271
|
+
const unsigned short px = __hgeu(a.x, b.x) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2272
|
+
const unsigned short py = __hgeu(a.y, b.y) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2273
|
+
unsigned ur = (unsigned)py;
|
2274
|
+
ur <<= (unsigned)16U;
|
2275
|
+
ur |= (unsigned)px;
|
2276
|
+
return ur;
|
2277
|
+
)
|
2278
|
+
}
|
2279
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __hltu2_mask(const __half2 a, const __half2 b)
|
2280
|
+
{
|
2281
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2282
|
+
__COMPARISON_OP_HALF2_MACRO_MASK(set.ltu)
|
2283
|
+
,
|
2284
|
+
const unsigned short px = __hltu(a.x, b.x) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2285
|
+
const unsigned short py = __hltu(a.y, b.y) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2286
|
+
unsigned ur = (unsigned)py;
|
2287
|
+
ur <<= (unsigned)16U;
|
2288
|
+
ur |= (unsigned)px;
|
2289
|
+
return ur;
|
2290
|
+
)
|
2291
|
+
}
|
2292
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ unsigned int __hgtu2_mask(const __half2 a, const __half2 b)
|
2293
|
+
{
|
2294
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2295
|
+
__COMPARISON_OP_HALF2_MACRO_MASK(set.gtu)
|
2296
|
+
,
|
2297
|
+
const unsigned short px = __hgtu(a.x, b.x) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2298
|
+
const unsigned short py = __hgtu(a.y, b.y) ? (unsigned short)0xFFFFU : (unsigned short)0U;
|
2299
|
+
unsigned ur = (unsigned)py;
|
2300
|
+
ur <<= (unsigned)16U;
|
2301
|
+
ur |= (unsigned)px;
|
2302
|
+
return ur;
|
2303
|
+
)
|
2304
|
+
}
|
2305
|
+
#undef __COMPARISON_OP_HALF2_MACRO_MASK
|
2306
|
+
|
2307
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hbeq2(const __half2 a, const __half2 b)
|
2308
|
+
{
|
2309
|
+
const unsigned mask = __heq2_mask(a, b);
|
2310
|
+
return (mask == 0xFFFFFFFFU);
|
2311
|
+
}
|
2312
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hbne2(const __half2 a, const __half2 b)
|
2313
|
+
{
|
2314
|
+
const unsigned mask = __hne2_mask(a, b);
|
2315
|
+
return (mask == 0xFFFFFFFFU);
|
2316
|
+
}
|
2317
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hble2(const __half2 a, const __half2 b)
|
2318
|
+
{
|
2319
|
+
const unsigned mask = __hle2_mask(a, b);
|
2320
|
+
return (mask == 0xFFFFFFFFU);
|
2321
|
+
}
|
2322
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hbge2(const __half2 a, const __half2 b)
|
2323
|
+
{
|
2324
|
+
const unsigned mask = __hge2_mask(a, b);
|
2325
|
+
return (mask == 0xFFFFFFFFU);
|
2326
|
+
}
|
2327
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hblt2(const __half2 a, const __half2 b)
|
2328
|
+
{
|
2329
|
+
const unsigned mask = __hlt2_mask(a, b);
|
2330
|
+
return (mask == 0xFFFFFFFFU);
|
2331
|
+
}
|
2332
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hbgt2(const __half2 a, const __half2 b)
|
2333
|
+
{
|
2334
|
+
const unsigned mask = __hgt2_mask(a, b);
|
2335
|
+
return (mask == 0xFFFFFFFFU);
|
2336
|
+
}
|
2337
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hbequ2(const __half2 a, const __half2 b)
|
2338
|
+
{
|
2339
|
+
const unsigned mask = __hequ2_mask(a, b);
|
2340
|
+
return (mask == 0xFFFFFFFFU);
|
2341
|
+
}
|
2342
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hbneu2(const __half2 a, const __half2 b)
|
2343
|
+
{
|
2344
|
+
const unsigned mask = __hneu2_mask(a, b);
|
2345
|
+
return (mask == 0xFFFFFFFFU);
|
2346
|
+
}
|
2347
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hbleu2(const __half2 a, const __half2 b)
|
2348
|
+
{
|
2349
|
+
const unsigned mask = __hleu2_mask(a, b);
|
2350
|
+
return (mask == 0xFFFFFFFFU);
|
2351
|
+
}
|
2352
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hbgeu2(const __half2 a, const __half2 b)
|
2353
|
+
{
|
2354
|
+
const unsigned mask = __hgeu2_mask(a, b);
|
2355
|
+
return (mask == 0xFFFFFFFFU);
|
2356
|
+
}
|
2357
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hbltu2(const __half2 a, const __half2 b)
|
2358
|
+
{
|
2359
|
+
const unsigned mask = __hltu2_mask(a, b);
|
2360
|
+
return (mask == 0xFFFFFFFFU);
|
2361
|
+
}
|
2362
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hbgtu2(const __half2 a, const __half2 b)
|
2363
|
+
{
|
2364
|
+
const unsigned mask = __hgtu2_mask(a, b);
|
2365
|
+
return (mask == 0xFFFFFFFFU);
|
2366
|
+
}
|
2367
|
+
/******************************************************************************
|
2368
|
+
* __half comparison *
|
2369
|
+
******************************************************************************/
|
2370
|
+
#define __COMPARISON_OP_HALF_MACRO(name) /* do */ {\
|
2371
|
+
unsigned short val; \
|
2372
|
+
asm( "{ .reg .pred __$temp3;\n" \
|
2373
|
+
" setp." __CUDA_FP16_STRINGIFY(name) ".f16 __$temp3, %1, %2;\n" \
|
2374
|
+
" selp.u16 %0, 1, 0, __$temp3;}" \
|
2375
|
+
: "=h"(val) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(b))); \
|
2376
|
+
return (val != 0U) ? true : false; \
|
2377
|
+
} /* while(0) */
|
2378
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __heq(const __half a, const __half b)
|
2379
|
+
{
|
2380
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2381
|
+
__COMPARISON_OP_HALF_MACRO(eq)
|
2382
|
+
,
|
2383
|
+
const float fa = __half2float(a);
|
2384
|
+
const float fb = __half2float(b);
|
2385
|
+
return (fa == fb);
|
2386
|
+
)
|
2387
|
+
}
|
2388
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hne(const __half a, const __half b)
|
2389
|
+
{
|
2390
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2391
|
+
__COMPARISON_OP_HALF_MACRO(ne)
|
2392
|
+
,
|
2393
|
+
const float fa = __half2float(a);
|
2394
|
+
const float fb = __half2float(b);
|
2395
|
+
return (fa != fb) && (!__hisnan(a)) && (!__hisnan(b));
|
2396
|
+
)
|
2397
|
+
}
|
2398
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hle(const __half a, const __half b)
|
2399
|
+
{
|
2400
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2401
|
+
__COMPARISON_OP_HALF_MACRO(le)
|
2402
|
+
,
|
2403
|
+
const float fa = __half2float(a);
|
2404
|
+
const float fb = __half2float(b);
|
2405
|
+
return (fa <= fb);
|
2406
|
+
)
|
2407
|
+
}
|
2408
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hge(const __half a, const __half b)
|
2409
|
+
{
|
2410
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2411
|
+
__COMPARISON_OP_HALF_MACRO(ge)
|
2412
|
+
,
|
2413
|
+
const float fa = __half2float(a);
|
2414
|
+
const float fb = __half2float(b);
|
2415
|
+
return (fa >= fb);
|
2416
|
+
)
|
2417
|
+
}
|
2418
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hlt(const __half a, const __half b)
|
2419
|
+
{
|
2420
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2421
|
+
__COMPARISON_OP_HALF_MACRO(lt)
|
2422
|
+
,
|
2423
|
+
const float fa = __half2float(a);
|
2424
|
+
const float fb = __half2float(b);
|
2425
|
+
return (fa < fb);
|
2426
|
+
)
|
2427
|
+
}
|
2428
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hgt(const __half a, const __half b)
|
2429
|
+
{
|
2430
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2431
|
+
__COMPARISON_OP_HALF_MACRO(gt)
|
2432
|
+
,
|
2433
|
+
const float fa = __half2float(a);
|
2434
|
+
const float fb = __half2float(b);
|
2435
|
+
return (fa > fb);
|
2436
|
+
)
|
2437
|
+
}
|
2438
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hequ(const __half a, const __half b)
|
2439
|
+
{
|
2440
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2441
|
+
__COMPARISON_OP_HALF_MACRO(equ)
|
2442
|
+
,
|
2443
|
+
const float fa = __half2float(a);
|
2444
|
+
const float fb = __half2float(b);
|
2445
|
+
return (fa == fb) || (__hisnan(a)) || (__hisnan(b));
|
2446
|
+
)
|
2447
|
+
}
|
2448
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hneu(const __half a, const __half b)
|
2449
|
+
{
|
2450
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2451
|
+
__COMPARISON_OP_HALF_MACRO(neu)
|
2452
|
+
,
|
2453
|
+
const float fa = __half2float(a);
|
2454
|
+
const float fb = __half2float(b);
|
2455
|
+
return (fa != fb);
|
2456
|
+
)
|
2457
|
+
}
|
2458
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hleu(const __half a, const __half b)
|
2459
|
+
{
|
2460
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2461
|
+
__COMPARISON_OP_HALF_MACRO(leu)
|
2462
|
+
,
|
2463
|
+
const float fa = __half2float(a);
|
2464
|
+
const float fb = __half2float(b);
|
2465
|
+
return (fa <= fb) || (__hisnan(a)) || (__hisnan(b));
|
2466
|
+
)
|
2467
|
+
}
|
2468
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hgeu(const __half a, const __half b)
|
2469
|
+
{
|
2470
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2471
|
+
__COMPARISON_OP_HALF_MACRO(geu)
|
2472
|
+
,
|
2473
|
+
const float fa = __half2float(a);
|
2474
|
+
const float fb = __half2float(b);
|
2475
|
+
return (fa >= fb) || (__hisnan(a)) || (__hisnan(b));
|
2476
|
+
)
|
2477
|
+
}
|
2478
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hltu(const __half a, const __half b)
|
2479
|
+
{
|
2480
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2481
|
+
__COMPARISON_OP_HALF_MACRO(ltu)
|
2482
|
+
,
|
2483
|
+
const float fa = __half2float(a);
|
2484
|
+
const float fb = __half2float(b);
|
2485
|
+
return (fa < fb) || (__hisnan(a)) || (__hisnan(b));
|
2486
|
+
)
|
2487
|
+
}
|
2488
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hgtu(const __half a, const __half b)
|
2489
|
+
{
|
2490
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2491
|
+
__COMPARISON_OP_HALF_MACRO(gtu)
|
2492
|
+
,
|
2493
|
+
const float fa = __half2float(a);
|
2494
|
+
const float fb = __half2float(b);
|
2495
|
+
return (fa > fb) || (__hisnan(a)) || (__hisnan(b));
|
2496
|
+
)
|
2497
|
+
}
|
2498
|
+
#undef __COMPARISON_OP_HALF_MACRO
|
2499
|
+
/******************************************************************************
|
2500
|
+
* __half2 arithmetic *
|
2501
|
+
******************************************************************************/
|
2502
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hadd2(const __half2 a, const __half2 b)
|
2503
|
+
{
|
2504
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2505
|
+
__BINARY_OP_HALF2_MACRO(add)
|
2506
|
+
,
|
2507
|
+
__half2 val;
|
2508
|
+
val.x = __hadd(a.x, b.x);
|
2509
|
+
val.y = __hadd(a.y, b.y);
|
2510
|
+
return val;
|
2511
|
+
)
|
2512
|
+
}
|
2513
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hsub2(const __half2 a, const __half2 b)
|
2514
|
+
{
|
2515
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2516
|
+
__BINARY_OP_HALF2_MACRO(sub)
|
2517
|
+
,
|
2518
|
+
__half2 val;
|
2519
|
+
val.x = __hsub(a.x, b.x);
|
2520
|
+
val.y = __hsub(a.y, b.y);
|
2521
|
+
return val;
|
2522
|
+
)
|
2523
|
+
}
|
2524
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hmul2(const __half2 a, const __half2 b)
|
2525
|
+
{
|
2526
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2527
|
+
__BINARY_OP_HALF2_MACRO(mul)
|
2528
|
+
,
|
2529
|
+
__half2 val;
|
2530
|
+
val.x = __hmul(a.x, b.x);
|
2531
|
+
val.y = __hmul(a.y, b.y);
|
2532
|
+
return val;
|
2533
|
+
)
|
2534
|
+
}
|
2535
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hadd2_sat(const __half2 a, const __half2 b)
|
2536
|
+
{
|
2537
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2538
|
+
__BINARY_OP_HALF2_MACRO(add.sat)
|
2539
|
+
,
|
2540
|
+
__half2 val;
|
2541
|
+
val.x = __hadd_sat(a.x, b.x);
|
2542
|
+
val.y = __hadd_sat(a.y, b.y);
|
2543
|
+
return val;
|
2544
|
+
)
|
2545
|
+
}
|
2546
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hsub2_sat(const __half2 a, const __half2 b)
|
2547
|
+
{
|
2548
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2549
|
+
__BINARY_OP_HALF2_MACRO(sub.sat)
|
2550
|
+
,
|
2551
|
+
__half2 val;
|
2552
|
+
val.x = __hsub_sat(a.x, b.x);
|
2553
|
+
val.y = __hsub_sat(a.y, b.y);
|
2554
|
+
return val;
|
2555
|
+
)
|
2556
|
+
}
|
2557
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hmul2_sat(const __half2 a, const __half2 b)
|
2558
|
+
{
|
2559
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2560
|
+
__BINARY_OP_HALF2_MACRO(mul.sat)
|
2561
|
+
,
|
2562
|
+
__half2 val;
|
2563
|
+
val.x = __hmul_sat(a.x, b.x);
|
2564
|
+
val.y = __hmul_sat(a.y, b.y);
|
2565
|
+
return val;
|
2566
|
+
)
|
2567
|
+
}
|
2568
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hadd2_rn(const __half2 a, const __half2 b)
|
2569
|
+
{
|
2570
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2571
|
+
__BINARY_OP_HALF2_MACRO(add.rn)
|
2572
|
+
,
|
2573
|
+
__half2 val;
|
2574
|
+
val.x = __hadd_rn(a.x, b.x);
|
2575
|
+
val.y = __hadd_rn(a.y, b.y);
|
2576
|
+
return val;
|
2577
|
+
)
|
2578
|
+
}
|
2579
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hsub2_rn(const __half2 a, const __half2 b)
|
2580
|
+
{
|
2581
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2582
|
+
__BINARY_OP_HALF2_MACRO(sub.rn)
|
2583
|
+
,
|
2584
|
+
__half2 val;
|
2585
|
+
val.x = __hsub_rn(a.x, b.x);
|
2586
|
+
val.y = __hsub_rn(a.y, b.y);
|
2587
|
+
return val;
|
2588
|
+
)
|
2589
|
+
}
|
2590
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hmul2_rn(const __half2 a, const __half2 b)
|
2591
|
+
{
|
2592
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2593
|
+
__BINARY_OP_HALF2_MACRO(mul.rn)
|
2594
|
+
,
|
2595
|
+
__half2 val;
|
2596
|
+
val.x = __hmul_rn(a.x, b.x);
|
2597
|
+
val.y = __hmul_rn(a.y, b.y);
|
2598
|
+
return val;
|
2599
|
+
)
|
2600
|
+
}
|
2601
|
+
#if defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)) || defined(_NVHPC_CUDA)
|
2602
|
+
__CUDA_FP16_DECL__ __half2 __hfma2(const __half2 a, const __half2 b, const __half2 c)
|
2603
|
+
{
|
2604
|
+
__TERNARY_OP_HALF2_MACRO(fma.rn)
|
2605
|
+
}
|
2606
|
+
__CUDA_FP16_DECL__ __half2 __hfma2_sat(const __half2 a, const __half2 b, const __half2 c)
|
2607
|
+
{
|
2608
|
+
__TERNARY_OP_HALF2_MACRO(fma.rn.sat)
|
2609
|
+
}
|
2610
|
+
#endif /* defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)) || defined(_NVHPC_CUDA) */
|
2611
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __h2div(const __half2 a, const __half2 b) {
|
2612
|
+
__half ha = __low2half(a);
|
2613
|
+
__half hb = __low2half(b);
|
2614
|
+
|
2615
|
+
const __half v1 = __hdiv(ha, hb);
|
2616
|
+
|
2617
|
+
ha = __high2half(a);
|
2618
|
+
hb = __high2half(b);
|
2619
|
+
|
2620
|
+
const __half v2 = __hdiv(ha, hb);
|
2621
|
+
|
2622
|
+
return __halves2half2(v1, v2);
|
2623
|
+
}
|
2624
|
+
|
2625
|
+
/******************************************************************************
|
2626
|
+
* __half arithmetic *
|
2627
|
+
******************************************************************************/
|
2628
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hadd(const __half a, const __half b)
|
2629
|
+
{
|
2630
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2631
|
+
__BINARY_OP_HALF_MACRO(add)
|
2632
|
+
,
|
2633
|
+
const float fa = __half2float(a);
|
2634
|
+
const float fb = __half2float(b);
|
2635
|
+
return __float2half(fa + fb);
|
2636
|
+
)
|
2637
|
+
}
|
2638
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hsub(const __half a, const __half b)
|
2639
|
+
{
|
2640
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2641
|
+
__BINARY_OP_HALF_MACRO(sub)
|
2642
|
+
,
|
2643
|
+
const float fa = __half2float(a);
|
2644
|
+
const float fb = __half2float(b);
|
2645
|
+
return __float2half(fa - fb);
|
2646
|
+
)
|
2647
|
+
}
|
2648
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hmul(const __half a, const __half b)
|
2649
|
+
{
|
2650
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2651
|
+
__BINARY_OP_HALF_MACRO(mul)
|
2652
|
+
,
|
2653
|
+
const float fa = __half2float(a);
|
2654
|
+
const float fb = __half2float(b);
|
2655
|
+
return __float2half(fa * fb);
|
2656
|
+
)
|
2657
|
+
}
|
2658
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hadd_sat(const __half a, const __half b)
|
2659
|
+
{
|
2660
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2661
|
+
__BINARY_OP_HALF_MACRO(add.sat)
|
2662
|
+
,
|
2663
|
+
return __hmin(__hmax(__hadd(a, b), CUDART_ZERO_FP16), CUDART_ONE_FP16);
|
2664
|
+
)
|
2665
|
+
}
|
2666
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hsub_sat(const __half a, const __half b)
|
2667
|
+
{
|
2668
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2669
|
+
__BINARY_OP_HALF_MACRO(sub.sat)
|
2670
|
+
,
|
2671
|
+
return __hmin(__hmax(__hsub(a, b), CUDART_ZERO_FP16), CUDART_ONE_FP16);
|
2672
|
+
)
|
2673
|
+
}
|
2674
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hmul_sat(const __half a, const __half b)
|
2675
|
+
{
|
2676
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2677
|
+
__BINARY_OP_HALF_MACRO(mul.sat)
|
2678
|
+
,
|
2679
|
+
return __hmin(__hmax(__hmul(a, b), CUDART_ZERO_FP16), CUDART_ONE_FP16);
|
2680
|
+
)
|
2681
|
+
}
|
2682
|
+
|
2683
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hadd_rn(const __half a, const __half b)
|
2684
|
+
{
|
2685
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2686
|
+
__BINARY_OP_HALF_MACRO(add.rn)
|
2687
|
+
,
|
2688
|
+
const float fa = __half2float(a);
|
2689
|
+
const float fb = __half2float(b);
|
2690
|
+
return __float2half(fa + fb);
|
2691
|
+
)
|
2692
|
+
}
|
2693
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hsub_rn(const __half a, const __half b)
|
2694
|
+
{
|
2695
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2696
|
+
__BINARY_OP_HALF_MACRO(sub.rn)
|
2697
|
+
,
|
2698
|
+
const float fa = __half2float(a);
|
2699
|
+
const float fb = __half2float(b);
|
2700
|
+
return __float2half(fa - fb);
|
2701
|
+
)
|
2702
|
+
}
|
2703
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hmul_rn(const __half a, const __half b)
|
2704
|
+
{
|
2705
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
2706
|
+
__BINARY_OP_HALF_MACRO(mul.rn)
|
2707
|
+
,
|
2708
|
+
const float fa = __half2float(a);
|
2709
|
+
const float fb = __half2float(b);
|
2710
|
+
return __float2half(fa * fb);
|
2711
|
+
)
|
2712
|
+
}
|
2713
|
+
#if defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)) || defined(_NVHPC_CUDA)
|
2714
|
+
__CUDA_FP16_DECL__ __half __hfma(const __half a, const __half b, const __half c)
|
2715
|
+
{
|
2716
|
+
__TERNARY_OP_HALF_MACRO(fma.rn)
|
2717
|
+
}
|
2718
|
+
__CUDA_FP16_DECL__ __half __hfma_sat(const __half a, const __half b, const __half c)
|
2719
|
+
{
|
2720
|
+
__TERNARY_OP_HALF_MACRO(fma.rn.sat)
|
2721
|
+
}
|
2722
|
+
#endif /* defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)) || defined(_NVHPC_CUDA) */
|
2723
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hdiv(const __half a, const __half b)
|
2724
|
+
{
|
2725
|
+
NV_IF_ELSE_TARGET(NV_IS_DEVICE,
|
2726
|
+
__half v;
|
2727
|
+
__half abs;
|
2728
|
+
__half den;
|
2729
|
+
__HALF_TO_US(den) = 0x008FU;
|
2730
|
+
|
2731
|
+
float rcp;
|
2732
|
+
const float fa = __half2float(a);
|
2733
|
+
const float fb = __half2float(b);
|
2734
|
+
|
2735
|
+
asm("{rcp.approx.ftz.f32 %0, %1;\n}" :"=f"(rcp) : "f"(fb));
|
2736
|
+
|
2737
|
+
float fv = rcp * fa;
|
2738
|
+
|
2739
|
+
v = __float2half(fv);
|
2740
|
+
abs = __habs(v);
|
2741
|
+
if (__hlt(abs, den) && __hlt(__float2half(0.0f), abs)) {
|
2742
|
+
const float err = __fmaf_rn(-fb, fv, fa);
|
2743
|
+
fv = __fmaf_rn(rcp, err, fv);
|
2744
|
+
v = __float2half(fv);
|
2745
|
+
}
|
2746
|
+
return v;
|
2747
|
+
,
|
2748
|
+
const float fa = __half2float(a);
|
2749
|
+
const float fb = __half2float(b);
|
2750
|
+
return __float2half(fa / fb);
|
2751
|
+
)
|
2752
|
+
}
|
2753
|
+
|
2754
|
+
/******************************************************************************
|
2755
|
+
* __half2 functions *
|
2756
|
+
******************************************************************************/
|
2757
|
+
#if defined(_NVHPC_CUDA) || defined(__CUDACC__)
|
2758
|
+
#define __APPROX_FCAST(fun) /* do */ {\
|
2759
|
+
__half val;\
|
2760
|
+
asm("{.reg.b32 f; \n"\
|
2761
|
+
" .reg.b16 r; \n"\
|
2762
|
+
" mov.b16 r,%1; \n"\
|
2763
|
+
" cvt.f32.f16 f,r; \n"\
|
2764
|
+
" " __CUDA_FP16_STRINGIFY(fun) ".approx.ftz.f32 f,f; \n"\
|
2765
|
+
" cvt.rn.f16.f32 r,f; \n"\
|
2766
|
+
" mov.b16 %0,r; \n"\
|
2767
|
+
"}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));\
|
2768
|
+
return val;\
|
2769
|
+
} /* while(0) */
|
2770
|
+
#define __APPROX_FCAST2(fun) /* do */ {\
|
2771
|
+
__half2 val;\
|
2772
|
+
asm("{.reg.b16 hl, hu; \n"\
|
2773
|
+
" .reg.b32 fl, fu; \n"\
|
2774
|
+
" mov.b32 {hl, hu}, %1; \n"\
|
2775
|
+
" cvt.f32.f16 fl, hl; \n"\
|
2776
|
+
" cvt.f32.f16 fu, hu; \n"\
|
2777
|
+
" " __CUDA_FP16_STRINGIFY(fun) ".approx.ftz.f32 fl, fl; \n"\
|
2778
|
+
" " __CUDA_FP16_STRINGIFY(fun) ".approx.ftz.f32 fu, fu; \n"\
|
2779
|
+
" cvt.rn.f16.f32 hl, fl; \n"\
|
2780
|
+
" cvt.rn.f16.f32 hu, fu; \n"\
|
2781
|
+
" mov.b32 %0, {hl, hu}; \n"\
|
2782
|
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a))); \
|
2783
|
+
return val;\
|
2784
|
+
} /* while(0) */
|
2785
|
+
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA)
|
2786
|
+
#define __SPEC_CASE2(i,r, spc, ulp) \
|
2787
|
+
"{.reg.b32 spc, ulp, p;\n"\
|
2788
|
+
" mov.b32 spc," __CUDA_FP16_STRINGIFY(spc) ";\n"\
|
2789
|
+
" mov.b32 ulp," __CUDA_FP16_STRINGIFY(ulp) ";\n"\
|
2790
|
+
" set.eq.f16x2.f16x2 p," __CUDA_FP16_STRINGIFY(i) ", spc;\n"\
|
2791
|
+
" fma.rn.f16x2 " __CUDA_FP16_STRINGIFY(r) ",p,ulp," __CUDA_FP16_STRINGIFY(r) ";\n}\n"
|
2792
|
+
#define __SPEC_CASE(i,r, spc, ulp) \
|
2793
|
+
"{.reg.b16 spc, ulp, p;\n"\
|
2794
|
+
" mov.b16 spc," __CUDA_FP16_STRINGIFY(spc) ";\n"\
|
2795
|
+
" mov.b16 ulp," __CUDA_FP16_STRINGIFY(ulp) ";\n"\
|
2796
|
+
" set.eq.f16.f16 p," __CUDA_FP16_STRINGIFY(i) ", spc;\n"\
|
2797
|
+
" fma.rn.f16 " __CUDA_FP16_STRINGIFY(r) ",p,ulp," __CUDA_FP16_STRINGIFY(r) ";\n}\n"
|
2798
|
+
static __device__ __forceinline__ float __float_simpl_sinf(float a);
|
2799
|
+
static __device__ __forceinline__ float __float_simpl_cosf(float a);
|
2800
|
+
__CUDA_FP16_DECL__ __half hsin(const __half a) {
|
2801
|
+
const float sl = __float_simpl_sinf(__half2float(a));
|
2802
|
+
__half r = __float2half_rn(sl);
|
2803
|
+
asm("{\n\t"
|
2804
|
+
" .reg.b16 i,r,t; \n\t"
|
2805
|
+
" mov.b16 r, %0; \n\t"
|
2806
|
+
" mov.b16 i, %1; \n\t"
|
2807
|
+
" and.b16 t, r, 0x8000U; \n\t"
|
2808
|
+
" abs.f16 r, r; \n\t"
|
2809
|
+
" abs.f16 i, i; \n\t"
|
2810
|
+
__SPEC_CASE(i, r, 0X32B3U, 0x0800U)
|
2811
|
+
__SPEC_CASE(i, r, 0X5CB0U, 0x9000U)
|
2812
|
+
" or.b16 r,r,t; \n\t"
|
2813
|
+
" mov.b16 %0, r; \n"
|
2814
|
+
"}\n" : "+h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)));
|
2815
|
+
return r;
|
2816
|
+
}
|
2817
|
+
__CUDA_FP16_DECL__ __half2 h2sin(const __half2 a) {
|
2818
|
+
const float sl = __float_simpl_sinf(__half2float(a.x));
|
2819
|
+
const float sh = __float_simpl_sinf(__half2float(a.y));
|
2820
|
+
__half2 r = __floats2half2_rn(sl, sh);
|
2821
|
+
asm("{\n\t"
|
2822
|
+
" .reg.b32 i,r,t; \n\t"
|
2823
|
+
" mov.b32 r, %0; \n\t"
|
2824
|
+
" mov.b32 i, %1; \n\t"
|
2825
|
+
" and.b32 t, r, 0x80008000U; \n\t"
|
2826
|
+
" abs.f16x2 r, r; \n\t"
|
2827
|
+
" abs.f16x2 i, i; \n\t"
|
2828
|
+
__SPEC_CASE2(i, r, 0X32B332B3U, 0x08000800U)
|
2829
|
+
__SPEC_CASE2(i, r, 0X5CB05CB0U, 0x90009000U)
|
2830
|
+
" or.b32 r, r, t; \n\t"
|
2831
|
+
" mov.b32 %0, r; \n"
|
2832
|
+
"}\n" : "+r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)));
|
2833
|
+
return r;
|
2834
|
+
}
|
2835
|
+
__CUDA_FP16_DECL__ __half hcos(const __half a) {
|
2836
|
+
const float cl = __float_simpl_cosf(__half2float(a));
|
2837
|
+
__half r = __float2half_rn(cl);
|
2838
|
+
asm("{\n\t"
|
2839
|
+
" .reg.b16 i,r; \n\t"
|
2840
|
+
" mov.b16 r, %0; \n\t"
|
2841
|
+
" mov.b16 i, %1; \n\t"
|
2842
|
+
" abs.f16 i, i; \n\t"
|
2843
|
+
__SPEC_CASE(i, r, 0X2B7CU, 0x1000U)
|
2844
|
+
" mov.b16 %0, r; \n"
|
2845
|
+
"}\n" : "+h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)));
|
2846
|
+
return r;
|
2847
|
+
}
|
2848
|
+
__CUDA_FP16_DECL__ __half2 h2cos(const __half2 a) {
|
2849
|
+
const float cl = __float_simpl_cosf(__half2float(a.x));
|
2850
|
+
const float ch = __float_simpl_cosf(__half2float(a.y));
|
2851
|
+
__half2 r = __floats2half2_rn(cl, ch);
|
2852
|
+
asm("{\n\t"
|
2853
|
+
" .reg.b32 i,r; \n\t"
|
2854
|
+
" mov.b32 r, %0; \n\t"
|
2855
|
+
" mov.b32 i, %1; \n\t"
|
2856
|
+
" abs.f16x2 i, i; \n\t"
|
2857
|
+
__SPEC_CASE2(i, r, 0X2B7C2B7CU, 0x10001000U)
|
2858
|
+
" mov.b32 %0, r; \n"
|
2859
|
+
"}\n" : "+r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)));
|
2860
|
+
return r;
|
2861
|
+
}
|
2862
|
+
static __device__ __forceinline__ float __internal_trig_reduction_kernel(const float a, unsigned int *const quadrant)
|
2863
|
+
{
|
2864
|
+
const float ar = __fmaf_rn(a, 0.636619772F, 12582912.0F);
|
2865
|
+
const unsigned q = __float_as_uint(ar);
|
2866
|
+
const float j = __fsub_rn(ar, 12582912.0F);
|
2867
|
+
float t = __fmaf_rn(j, -1.5707962512969971e+000F, a);
|
2868
|
+
t = __fmaf_rn(j, -7.5497894158615964e-008F, t);
|
2869
|
+
*quadrant = q;
|
2870
|
+
return t;
|
2871
|
+
}
|
2872
|
+
static __device__ __forceinline__ float __internal_sin_cos_kernel(const float x, const unsigned int i)
|
2873
|
+
{
|
2874
|
+
float z;
|
2875
|
+
const float x2 = x*x;
|
2876
|
+
float a8;
|
2877
|
+
float a6;
|
2878
|
+
float a4;
|
2879
|
+
float a2;
|
2880
|
+
float a1;
|
2881
|
+
float a0;
|
2882
|
+
|
2883
|
+
if ((i & 1U) != 0U) {
|
2884
|
+
// cos
|
2885
|
+
a8 = 2.44331571e-5F;
|
2886
|
+
a6 = -1.38873163e-3F;
|
2887
|
+
a4 = 4.16666457e-2F;
|
2888
|
+
a2 = -5.00000000e-1F;
|
2889
|
+
a1 = x2;
|
2890
|
+
a0 = 1.0F;
|
2891
|
+
}
|
2892
|
+
else {
|
2893
|
+
// sin
|
2894
|
+
a8 = -1.95152959e-4F;
|
2895
|
+
a6 = 8.33216087e-3F;
|
2896
|
+
a4 = -1.66666546e-1F;
|
2897
|
+
a2 = 0.0F;
|
2898
|
+
a1 = x;
|
2899
|
+
a0 = x;
|
2900
|
+
}
|
2901
|
+
|
2902
|
+
z = __fmaf_rn(a8, x2, a6);
|
2903
|
+
z = __fmaf_rn(z, x2, a4);
|
2904
|
+
z = __fmaf_rn(z, x2, a2);
|
2905
|
+
z = __fmaf_rn(z, a1, a0);
|
2906
|
+
|
2907
|
+
if ((i & 2U) != 0U) {
|
2908
|
+
z = -z;
|
2909
|
+
}
|
2910
|
+
return z;
|
2911
|
+
}
|
2912
|
+
static __device__ __forceinline__ float __float_simpl_sinf(float a)
|
2913
|
+
{
|
2914
|
+
float z;
|
2915
|
+
unsigned i;
|
2916
|
+
a = __internal_trig_reduction_kernel(a, &i);
|
2917
|
+
z = __internal_sin_cos_kernel(a, i);
|
2918
|
+
return z;
|
2919
|
+
}
|
2920
|
+
static __device__ __forceinline__ float __float_simpl_cosf(float a)
|
2921
|
+
{
|
2922
|
+
float z;
|
2923
|
+
unsigned i;
|
2924
|
+
a = __internal_trig_reduction_kernel(a, &i);
|
2925
|
+
z = __internal_sin_cos_kernel(a, (i & 0x3U) + 1U);
|
2926
|
+
return z;
|
2927
|
+
}
|
2928
|
+
|
2929
|
+
__CUDA_FP16_DECL__ __half hexp(const __half a) {
|
2930
|
+
__half val;
|
2931
|
+
asm("{.reg.b32 f, C, nZ; \n"
|
2932
|
+
" .reg.b16 h,r; \n"
|
2933
|
+
" mov.b16 h,%1; \n"
|
2934
|
+
" cvt.f32.f16 f,h; \n"
|
2935
|
+
" mov.b32 C, 0x3fb8aa3bU; \n"
|
2936
|
+
" mov.b32 nZ, 0x80000000U;\n"
|
2937
|
+
" fma.rn.f32 f,f,C,nZ; \n"
|
2938
|
+
" ex2.approx.ftz.f32 f,f; \n"
|
2939
|
+
" cvt.rn.f16.f32 r,f; \n"
|
2940
|
+
__SPEC_CASE(h, r, 0X1F79U, 0x9400U)
|
2941
|
+
__SPEC_CASE(h, r, 0X25CFU, 0x9400U)
|
2942
|
+
__SPEC_CASE(h, r, 0XC13BU, 0x0400U)
|
2943
|
+
__SPEC_CASE(h, r, 0XC1EFU, 0x0200U)
|
2944
|
+
" mov.b16 %0,r; \n"
|
2945
|
+
"}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
|
2946
|
+
return val;
|
2947
|
+
}
|
2948
|
+
__CUDA_FP16_DECL__ __half2 h2exp(const __half2 a) {
|
2949
|
+
__half2 val;
|
2950
|
+
asm("{.reg.b16 hl, hu; \n"
|
2951
|
+
" .reg.b32 h,r,fl,fu,C,nZ; \n"
|
2952
|
+
" mov.b32 {hl, hu}, %1; \n"
|
2953
|
+
" mov.b32 h, %1; \n"
|
2954
|
+
" cvt.f32.f16 fl, hl; \n"
|
2955
|
+
" cvt.f32.f16 fu, hu; \n"
|
2956
|
+
" mov.b32 C, 0x3fb8aa3bU; \n"
|
2957
|
+
" mov.b32 nZ, 0x80000000U;\n"
|
2958
|
+
" fma.rn.f32 fl,fl,C,nZ; \n"
|
2959
|
+
" fma.rn.f32 fu,fu,C,nZ; \n"
|
2960
|
+
" ex2.approx.ftz.f32 fl, fl; \n"
|
2961
|
+
" ex2.approx.ftz.f32 fu, fu; \n"
|
2962
|
+
" cvt.rn.f16.f32 hl, fl; \n"
|
2963
|
+
" cvt.rn.f16.f32 hu, fu; \n"
|
2964
|
+
" mov.b32 r, {hl, hu}; \n"
|
2965
|
+
__SPEC_CASE2(h, r, 0X1F791F79U, 0x94009400U)
|
2966
|
+
__SPEC_CASE2(h, r, 0X25CF25CFU, 0x94009400U)
|
2967
|
+
__SPEC_CASE2(h, r, 0XC13BC13BU, 0x04000400U)
|
2968
|
+
__SPEC_CASE2(h, r, 0XC1EFC1EFU, 0x02000200U)
|
2969
|
+
" mov.b32 %0, r; \n"
|
2970
|
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
2971
|
+
return val;
|
2972
|
+
}
|
2973
|
+
#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA) */
|
2974
|
+
|
2975
|
+
__CUDA_FP16_DECL__ __half htanh(const __half a) {
|
2976
|
+
float f = __half2float(a);
|
2977
|
+
f = tanhf(f);
|
2978
|
+
__half h = __float2half_rn(f);
|
2979
|
+
return h;
|
2980
|
+
}
|
2981
|
+
__CUDA_FP16_DECL__ __half2 h2tanh(const __half2 a) {
|
2982
|
+
float2 f = __half22float2(a);
|
2983
|
+
f.x = tanhf(f.x);
|
2984
|
+
f.y = tanhf(f.y);
|
2985
|
+
__half2 h = __float22half2_rn(f);
|
2986
|
+
return h;
|
2987
|
+
}
|
2988
|
+
|
2989
|
+
__CUDA_FP16_DECL__ __half htanh_approx(const __half a) {
|
2990
|
+
__half r;
|
2991
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_75,
|
2992
|
+
__half_raw hr = (__half_raw)a;
|
2993
|
+
asm("tanh.approx.f16 %0, %0;" : "+h"(hr.x));
|
2994
|
+
r = (__half)hr;
|
2995
|
+
,
|
2996
|
+
r = htanh(a);
|
2997
|
+
)
|
2998
|
+
return r;
|
2999
|
+
}
|
3000
|
+
__CUDA_FP16_DECL__ __half2 h2tanh_approx(const __half2 a) {
|
3001
|
+
__half2 res;
|
3002
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_75,
|
3003
|
+
asm("tanh.approx.f16x2 %0, %1;" : "=r"(__HALF2_TO_UI(res)) : "r"(__HALF2_TO_CUI(a)));
|
3004
|
+
,
|
3005
|
+
res = h2tanh(a);
|
3006
|
+
)
|
3007
|
+
return res;
|
3008
|
+
}
|
3009
|
+
|
3010
|
+
__CUDA_FP16_DECL__ __half hexp2(const __half a) {
|
3011
|
+
__half val;
|
3012
|
+
asm("{.reg.b32 f, ULP; \n"
|
3013
|
+
" .reg.b16 r; \n"
|
3014
|
+
" mov.b16 r,%1; \n"
|
3015
|
+
" cvt.f32.f16 f,r; \n"
|
3016
|
+
" ex2.approx.ftz.f32 f,f; \n"
|
3017
|
+
" mov.b32 ULP, 0x33800000U;\n"
|
3018
|
+
" fma.rn.f32 f,f,ULP,f; \n"
|
3019
|
+
" cvt.rn.f16.f32 r,f; \n"
|
3020
|
+
" mov.b16 %0,r; \n"
|
3021
|
+
"}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
|
3022
|
+
return val;
|
3023
|
+
}
|
3024
|
+
__CUDA_FP16_DECL__ __half2 h2exp2(const __half2 a) {
|
3025
|
+
__half2 val;
|
3026
|
+
asm("{.reg.b16 hl, hu; \n"
|
3027
|
+
" .reg.b32 fl, fu, ULP; \n"
|
3028
|
+
" mov.b32 {hl, hu}, %1; \n"
|
3029
|
+
" cvt.f32.f16 fl, hl; \n"
|
3030
|
+
" cvt.f32.f16 fu, hu; \n"
|
3031
|
+
" ex2.approx.ftz.f32 fl, fl; \n"
|
3032
|
+
" ex2.approx.ftz.f32 fu, fu; \n"
|
3033
|
+
" mov.b32 ULP, 0x33800000U;\n"
|
3034
|
+
" fma.rn.f32 fl,fl,ULP,fl; \n"
|
3035
|
+
" fma.rn.f32 fu,fu,ULP,fu; \n"
|
3036
|
+
" cvt.rn.f16.f32 hl, fl; \n"
|
3037
|
+
" cvt.rn.f16.f32 hu, fu; \n"
|
3038
|
+
" mov.b32 %0, {hl, hu}; \n"
|
3039
|
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
3040
|
+
return val;
|
3041
|
+
}
|
3042
|
+
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA)
|
3043
|
+
__CUDA_FP16_DECL__ __half hexp10(const __half a) {
|
3044
|
+
__half val;
|
3045
|
+
asm("{.reg.b16 h,r; \n"
|
3046
|
+
" .reg.b32 f, C, nZ; \n"
|
3047
|
+
" mov.b16 h, %1; \n"
|
3048
|
+
" cvt.f32.f16 f, h; \n"
|
3049
|
+
" mov.b32 C, 0x40549A78U; \n"
|
3050
|
+
" mov.b32 nZ, 0x80000000U;\n"
|
3051
|
+
" fma.rn.f32 f,f,C,nZ; \n"
|
3052
|
+
" ex2.approx.ftz.f32 f, f; \n"
|
3053
|
+
" cvt.rn.f16.f32 r, f; \n"
|
3054
|
+
__SPEC_CASE(h, r, 0x34DEU, 0x9800U)
|
3055
|
+
__SPEC_CASE(h, r, 0x9766U, 0x9000U)
|
3056
|
+
__SPEC_CASE(h, r, 0x9972U, 0x1000U)
|
3057
|
+
__SPEC_CASE(h, r, 0xA5C4U, 0x1000U)
|
3058
|
+
__SPEC_CASE(h, r, 0xBF0AU, 0x8100U)
|
3059
|
+
" mov.b16 %0, r; \n"
|
3060
|
+
"}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
|
3061
|
+
return val;
|
3062
|
+
}
|
3063
|
+
__CUDA_FP16_DECL__ __half2 h2exp10(const __half2 a) {
|
3064
|
+
__half2 val;
|
3065
|
+
asm("{.reg.b16 hl, hu; \n"
|
3066
|
+
" .reg.b32 h,r,fl,fu,C,nZ; \n"
|
3067
|
+
" mov.b32 {hl, hu}, %1; \n"
|
3068
|
+
" mov.b32 h, %1; \n"
|
3069
|
+
" cvt.f32.f16 fl, hl; \n"
|
3070
|
+
" cvt.f32.f16 fu, hu; \n"
|
3071
|
+
" mov.b32 C, 0x40549A78U; \n"
|
3072
|
+
" mov.b32 nZ, 0x80000000U;\n"
|
3073
|
+
" fma.rn.f32 fl,fl,C,nZ; \n"
|
3074
|
+
" fma.rn.f32 fu,fu,C,nZ; \n"
|
3075
|
+
" ex2.approx.ftz.f32 fl, fl; \n"
|
3076
|
+
" ex2.approx.ftz.f32 fu, fu; \n"
|
3077
|
+
" cvt.rn.f16.f32 hl, fl; \n"
|
3078
|
+
" cvt.rn.f16.f32 hu, fu; \n"
|
3079
|
+
" mov.b32 r, {hl, hu}; \n"
|
3080
|
+
__SPEC_CASE2(h, r, 0x34DE34DEU, 0x98009800U)
|
3081
|
+
__SPEC_CASE2(h, r, 0x97669766U, 0x90009000U)
|
3082
|
+
__SPEC_CASE2(h, r, 0x99729972U, 0x10001000U)
|
3083
|
+
__SPEC_CASE2(h, r, 0xA5C4A5C4U, 0x10001000U)
|
3084
|
+
__SPEC_CASE2(h, r, 0xBF0ABF0AU, 0x81008100U)
|
3085
|
+
" mov.b32 %0, r; \n"
|
3086
|
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
3087
|
+
return val;
|
3088
|
+
}
|
3089
|
+
__CUDA_FP16_DECL__ __half hlog2(const __half a) {
|
3090
|
+
__half val;
|
3091
|
+
asm("{.reg.b16 h, r; \n"
|
3092
|
+
" .reg.b32 f; \n"
|
3093
|
+
" mov.b16 h, %1; \n"
|
3094
|
+
" cvt.f32.f16 f, h; \n"
|
3095
|
+
" lg2.approx.ftz.f32 f, f; \n"
|
3096
|
+
" cvt.rn.f16.f32 r, f; \n"
|
3097
|
+
__SPEC_CASE(r, r, 0xA2E2U, 0x8080U)
|
3098
|
+
__SPEC_CASE(r, r, 0xBF46U, 0x9400U)
|
3099
|
+
" mov.b16 %0, r; \n"
|
3100
|
+
"}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
|
3101
|
+
return val;
|
3102
|
+
}
|
3103
|
+
__CUDA_FP16_DECL__ __half2 h2log2(const __half2 a) {
|
3104
|
+
__half2 val;
|
3105
|
+
asm("{.reg.b16 hl, hu; \n"
|
3106
|
+
" .reg.b32 fl, fu, r, p; \n"
|
3107
|
+
" mov.b32 {hl, hu}, %1; \n"
|
3108
|
+
" cvt.f32.f16 fl, hl; \n"
|
3109
|
+
" cvt.f32.f16 fu, hu; \n"
|
3110
|
+
" lg2.approx.ftz.f32 fl, fl; \n"
|
3111
|
+
" lg2.approx.ftz.f32 fu, fu; \n"
|
3112
|
+
" cvt.rn.f16.f32 hl, fl; \n"
|
3113
|
+
" cvt.rn.f16.f32 hu, fu; \n"
|
3114
|
+
" mov.b32 r, {hl, hu}; \n"
|
3115
|
+
__SPEC_CASE2(r, r, 0xA2E2A2E2U, 0x80808080U)
|
3116
|
+
__SPEC_CASE2(r, r, 0xBF46BF46U, 0x94009400U)
|
3117
|
+
" mov.b32 %0, r; \n"
|
3118
|
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
3119
|
+
return val;
|
3120
|
+
}
|
3121
|
+
__CUDA_FP16_DECL__ __half hlog(const __half a) {
|
3122
|
+
__half val;
|
3123
|
+
asm("{.reg.b32 f, C; \n"
|
3124
|
+
" .reg.b16 r,h; \n"
|
3125
|
+
" mov.b16 h,%1; \n"
|
3126
|
+
" cvt.f32.f16 f,h; \n"
|
3127
|
+
" lg2.approx.ftz.f32 f,f; \n"
|
3128
|
+
" mov.b32 C, 0x3f317218U; \n"
|
3129
|
+
" mul.f32 f,f,C; \n"
|
3130
|
+
" cvt.rn.f16.f32 r,f; \n"
|
3131
|
+
__SPEC_CASE(h, r, 0X160DU, 0x9C00U)
|
3132
|
+
__SPEC_CASE(h, r, 0X3BFEU, 0x8010U)
|
3133
|
+
__SPEC_CASE(h, r, 0X3C0BU, 0x8080U)
|
3134
|
+
__SPEC_CASE(h, r, 0X6051U, 0x1C00U)
|
3135
|
+
" mov.b16 %0,r; \n"
|
3136
|
+
"}": "=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
|
3137
|
+
return val;
|
3138
|
+
}
|
3139
|
+
__CUDA_FP16_DECL__ __half2 h2log(const __half2 a) {
|
3140
|
+
__half2 val;
|
3141
|
+
asm("{.reg.b16 hl, hu; \n"
|
3142
|
+
" .reg.b32 r, fl, fu, C, h; \n"
|
3143
|
+
" mov.b32 {hl, hu}, %1; \n"
|
3144
|
+
" mov.b32 h, %1; \n"
|
3145
|
+
" cvt.f32.f16 fl, hl; \n"
|
3146
|
+
" cvt.f32.f16 fu, hu; \n"
|
3147
|
+
" lg2.approx.ftz.f32 fl, fl; \n"
|
3148
|
+
" lg2.approx.ftz.f32 fu, fu; \n"
|
3149
|
+
" mov.b32 C, 0x3f317218U; \n"
|
3150
|
+
" mul.f32 fl,fl,C; \n"
|
3151
|
+
" mul.f32 fu,fu,C; \n"
|
3152
|
+
" cvt.rn.f16.f32 hl, fl; \n"
|
3153
|
+
" cvt.rn.f16.f32 hu, fu; \n"
|
3154
|
+
" mov.b32 r, {hl, hu}; \n"
|
3155
|
+
__SPEC_CASE2(h, r, 0X160D160DU, 0x9C009C00U)
|
3156
|
+
__SPEC_CASE2(h, r, 0X3BFE3BFEU, 0x80108010U)
|
3157
|
+
__SPEC_CASE2(h, r, 0X3C0B3C0BU, 0x80808080U)
|
3158
|
+
__SPEC_CASE2(h, r, 0X60516051U, 0x1C001C00U)
|
3159
|
+
" mov.b32 %0, r; \n"
|
3160
|
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
3161
|
+
return val;
|
3162
|
+
}
|
3163
|
+
__CUDA_FP16_DECL__ __half hlog10(const __half a) {
|
3164
|
+
__half val;
|
3165
|
+
asm("{.reg.b16 h, r; \n"
|
3166
|
+
" .reg.b32 f, C; \n"
|
3167
|
+
" mov.b16 h, %1; \n"
|
3168
|
+
" cvt.f32.f16 f, h; \n"
|
3169
|
+
" lg2.approx.ftz.f32 f, f; \n"
|
3170
|
+
" mov.b32 C, 0x3E9A209BU; \n"
|
3171
|
+
" mul.f32 f,f,C; \n"
|
3172
|
+
" cvt.rn.f16.f32 r, f; \n"
|
3173
|
+
__SPEC_CASE(h, r, 0x338FU, 0x1000U)
|
3174
|
+
__SPEC_CASE(h, r, 0x33F8U, 0x9000U)
|
3175
|
+
__SPEC_CASE(h, r, 0x57E1U, 0x9800U)
|
3176
|
+
__SPEC_CASE(h, r, 0x719DU, 0x9C00U)
|
3177
|
+
" mov.b16 %0, r; \n"
|
3178
|
+
"}":"=h"(__HALF_TO_US(val)) : "h"(__HALF_TO_CUS(a)));
|
3179
|
+
return val;
|
3180
|
+
}
|
3181
|
+
__CUDA_FP16_DECL__ __half2 h2log10(const __half2 a) {
|
3182
|
+
__half2 val;
|
3183
|
+
asm("{.reg.b16 hl, hu; \n"
|
3184
|
+
" .reg.b32 r, fl, fu, C, h; \n"
|
3185
|
+
" mov.b32 {hl, hu}, %1; \n"
|
3186
|
+
" mov.b32 h, %1; \n"
|
3187
|
+
" cvt.f32.f16 fl, hl; \n"
|
3188
|
+
" cvt.f32.f16 fu, hu; \n"
|
3189
|
+
" lg2.approx.ftz.f32 fl, fl; \n"
|
3190
|
+
" lg2.approx.ftz.f32 fu, fu; \n"
|
3191
|
+
" mov.b32 C, 0x3E9A209BU; \n"
|
3192
|
+
" mul.f32 fl,fl,C; \n"
|
3193
|
+
" mul.f32 fu,fu,C; \n"
|
3194
|
+
" cvt.rn.f16.f32 hl, fl; \n"
|
3195
|
+
" cvt.rn.f16.f32 hu, fu; \n"
|
3196
|
+
" mov.b32 r, {hl, hu}; \n"
|
3197
|
+
__SPEC_CASE2(h, r, 0x338F338FU, 0x10001000U)
|
3198
|
+
__SPEC_CASE2(h, r, 0x33F833F8U, 0x90009000U)
|
3199
|
+
__SPEC_CASE2(h, r, 0x57E157E1U, 0x98009800U)
|
3200
|
+
__SPEC_CASE2(h, r, 0x719D719DU, 0x9C009C00U)
|
3201
|
+
" mov.b32 %0, r; \n"
|
3202
|
+
"}":"=r"(__HALF2_TO_UI(val)) : "r"(__HALF2_TO_CUI(a)));
|
3203
|
+
return val;
|
3204
|
+
}
|
3205
|
+
#undef __SPEC_CASE2
|
3206
|
+
#undef __SPEC_CASE
|
3207
|
+
#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) || defined(_NVHPC_CUDA) */
|
3208
|
+
__CUDA_FP16_DECL__ __half2 h2rcp(const __half2 a) {
|
3209
|
+
__APPROX_FCAST2(rcp)
|
3210
|
+
}
|
3211
|
+
__CUDA_FP16_DECL__ __half hrcp(const __half a) {
|
3212
|
+
__APPROX_FCAST(rcp)
|
3213
|
+
}
|
3214
|
+
__CUDA_FP16_DECL__ __half2 h2rsqrt(const __half2 a) {
|
3215
|
+
__APPROX_FCAST2(rsqrt)
|
3216
|
+
}
|
3217
|
+
__CUDA_FP16_DECL__ __half hrsqrt(const __half a) {
|
3218
|
+
__APPROX_FCAST(rsqrt)
|
3219
|
+
}
|
3220
|
+
__CUDA_FP16_DECL__ __half2 h2sqrt(const __half2 a) {
|
3221
|
+
__APPROX_FCAST2(sqrt)
|
3222
|
+
}
|
3223
|
+
__CUDA_FP16_DECL__ __half hsqrt(const __half a) {
|
3224
|
+
__APPROX_FCAST(sqrt)
|
3225
|
+
}
|
3226
|
+
#undef __APPROX_FCAST
|
3227
|
+
#undef __APPROX_FCAST2
|
3228
|
+
#endif /* defined(_NVHPC_CUDA) || defined(__CUDACC__) */
|
3229
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hisnan2(const __half2 a)
|
3230
|
+
{
|
3231
|
+
__half2 r;
|
3232
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
3233
|
+
asm("{set.nan.f16x2.f16x2 %0,%1,%2;\n}"
|
3234
|
+
:"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)), "r"(__HALF2_TO_CUI(a)));
|
3235
|
+
,
|
3236
|
+
__half2_raw val;
|
3237
|
+
val.x = __hisnan(a.x) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
3238
|
+
val.y = __hisnan(a.y) ? (unsigned short)0x3C00U : (unsigned short)0U;
|
3239
|
+
r = __half2(val);
|
3240
|
+
)
|
3241
|
+
return r;
|
3242
|
+
}
|
3243
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ bool __hisnan(const __half a)
|
3244
|
+
{
|
3245
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
3246
|
+
__half r;
|
3247
|
+
asm("{set.nan.f16.f16 %0,%1,%2;\n}"
|
3248
|
+
:"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)), "h"(__HALF_TO_CUS(a)));
|
3249
|
+
return __HALF_TO_CUS(r) != 0U;
|
3250
|
+
,
|
3251
|
+
const __half_raw hr = static_cast<__half_raw>(a);
|
3252
|
+
return ((hr.x & (unsigned short)0x7FFFU) > (unsigned short)0x7C00U);
|
3253
|
+
)
|
3254
|
+
}
|
3255
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hneg2(const __half2 a)
|
3256
|
+
{
|
3257
|
+
__half2 r;
|
3258
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
3259
|
+
asm("{neg.f16x2 %0,%1;\n}"
|
3260
|
+
:"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)));
|
3261
|
+
,
|
3262
|
+
r.x = __hneg(a.x);
|
3263
|
+
r.y = __hneg(a.y);
|
3264
|
+
)
|
3265
|
+
return r;
|
3266
|
+
}
|
3267
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hneg(const __half a)
|
3268
|
+
{
|
3269
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
3270
|
+
__half r;
|
3271
|
+
asm("{neg.f16 %0,%1;\n}"
|
3272
|
+
:"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)));
|
3273
|
+
return r;
|
3274
|
+
,
|
3275
|
+
const float fa = __half2float(a);
|
3276
|
+
return __float2half(-fa);
|
3277
|
+
)
|
3278
|
+
}
|
3279
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __habs2(const __half2 a)
|
3280
|
+
{
|
3281
|
+
__half2 r;
|
3282
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
3283
|
+
asm("{abs.f16x2 %0,%1;\n}"
|
3284
|
+
:"=r"(__HALF2_TO_UI(r)) : "r"(__HALF2_TO_CUI(a)));
|
3285
|
+
,
|
3286
|
+
r.x = __habs(a.x);
|
3287
|
+
r.y = __habs(a.y);
|
3288
|
+
)
|
3289
|
+
return r;
|
3290
|
+
}
|
3291
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __habs(const __half a)
|
3292
|
+
{
|
3293
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_53,
|
3294
|
+
__half r;
|
3295
|
+
asm("{abs.f16 %0,%1;\n}"
|
3296
|
+
:"=h"(__HALF_TO_US(r)) : "h"(__HALF_TO_CUS(a)));
|
3297
|
+
return r;
|
3298
|
+
,
|
3299
|
+
__half_raw abs_a_raw = static_cast<__half_raw>(a);
|
3300
|
+
abs_a_raw.x &= (unsigned short)0x7FFFU;
|
3301
|
+
if (abs_a_raw.x > (unsigned short)0x7C00U)
|
3302
|
+
{
|
3303
|
+
// return canonical NaN
|
3304
|
+
abs_a_raw.x = (unsigned short)0x7FFFU;
|
3305
|
+
}
|
3306
|
+
return static_cast<__half>(abs_a_raw);
|
3307
|
+
)
|
3308
|
+
}
|
3309
|
+
#if defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)) || defined(_NVHPC_CUDA)
|
3310
|
+
__CUDA_FP16_DECL__ __half2 __hcmadd(const __half2 a, const __half2 b, const __half2 c)
|
3311
|
+
{
|
3312
|
+
// fast version of complex multiply-accumulate
|
3313
|
+
// (a.re, a.im) * (b.re, b.im) + (c.re, c.im)
|
3314
|
+
// acc.re = (c.re + a.re*b.re) - a.im*b.im
|
3315
|
+
// acc.im = (c.im + a.re*b.im) + a.im*b.re
|
3316
|
+
__half real_tmp = __hfma(a.x, b.x, c.x);
|
3317
|
+
__half img_tmp = __hfma(a.x, b.y, c.y);
|
3318
|
+
real_tmp = __hfma(__hneg(a.y), b.y, real_tmp);
|
3319
|
+
img_tmp = __hfma(a.y, b.x, img_tmp);
|
3320
|
+
return make_half2(real_tmp, img_tmp);
|
3321
|
+
}
|
3322
|
+
#endif /* defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)) || defined(_NVHPC_CUDA) */
|
3323
|
+
|
3324
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hmax_nan(const __half a, const __half b)
|
3325
|
+
{
|
3326
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
|
3327
|
+
__BINARY_OP_HALF_MACRO(max.NaN)
|
3328
|
+
,
|
3329
|
+
__half maxval;
|
3330
|
+
if (__hisnan(a) || __hisnan(b))
|
3331
|
+
{
|
3332
|
+
maxval = CUDART_NAN_FP16;
|
3333
|
+
}
|
3334
|
+
else
|
3335
|
+
{
|
3336
|
+
maxval = __hmax(a, b);
|
3337
|
+
}
|
3338
|
+
return maxval;
|
3339
|
+
)
|
3340
|
+
}
|
3341
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half __hmin_nan(const __half a, const __half b)
|
3342
|
+
{
|
3343
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
|
3344
|
+
__BINARY_OP_HALF_MACRO(min.NaN)
|
3345
|
+
,
|
3346
|
+
__half minval;
|
3347
|
+
if (__hisnan(a) || __hisnan(b))
|
3348
|
+
{
|
3349
|
+
minval = CUDART_NAN_FP16;
|
3350
|
+
}
|
3351
|
+
else
|
3352
|
+
{
|
3353
|
+
minval = __hmin(a, b);
|
3354
|
+
}
|
3355
|
+
return minval;
|
3356
|
+
)
|
3357
|
+
}
|
3358
|
+
|
3359
|
+
#if defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)) || defined(_NVHPC_CUDA)
|
3360
|
+
__CUDA_FP16_DECL__ __half __hfma_relu(const __half a, const __half b, const __half c)
|
3361
|
+
{
|
3362
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
|
3363
|
+
__TERNARY_OP_HALF_MACRO(fma.rn.relu)
|
3364
|
+
,
|
3365
|
+
return __hmax_nan(__hfma(a, b, c), CUDART_ZERO_FP16);
|
3366
|
+
)
|
3367
|
+
}
|
3368
|
+
#endif /* defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)) || defined(_NVHPC_CUDA) */
|
3369
|
+
|
3370
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hmax2_nan(const __half2 a, const __half2 b)
|
3371
|
+
{
|
3372
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
|
3373
|
+
__BINARY_OP_HALF2_MACRO(max.NaN)
|
3374
|
+
,
|
3375
|
+
__half2 result = __hmax2(a, b);
|
3376
|
+
if (__hisnan(a.x) || __hisnan(b.x))
|
3377
|
+
{
|
3378
|
+
result.x = CUDART_NAN_FP16;
|
3379
|
+
}
|
3380
|
+
if (__hisnan(a.y) || __hisnan(b.y))
|
3381
|
+
{
|
3382
|
+
result.y = CUDART_NAN_FP16;
|
3383
|
+
}
|
3384
|
+
return result;
|
3385
|
+
)
|
3386
|
+
}
|
3387
|
+
__CUDA_HOSTDEVICE_FP16_DECL__ __half2 __hmin2_nan(const __half2 a, const __half2 b)
|
3388
|
+
{
|
3389
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
|
3390
|
+
__BINARY_OP_HALF2_MACRO(min.NaN)
|
3391
|
+
,
|
3392
|
+
__half2 result = __hmin2(a, b);
|
3393
|
+
if (__hisnan(a.x) || __hisnan(b.x))
|
3394
|
+
{
|
3395
|
+
result.x = CUDART_NAN_FP16;
|
3396
|
+
}
|
3397
|
+
if (__hisnan(a.y) || __hisnan(b.y))
|
3398
|
+
{
|
3399
|
+
result.y = CUDART_NAN_FP16;
|
3400
|
+
}
|
3401
|
+
return result;
|
3402
|
+
)
|
3403
|
+
}
|
3404
|
+
#if defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)) || defined(_NVHPC_CUDA)
|
3405
|
+
__CUDA_FP16_DECL__ __half2 __hfma2_relu(const __half2 a, const __half2 b, const __half2 c)
|
3406
|
+
{
|
3407
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
|
3408
|
+
__TERNARY_OP_HALF2_MACRO(fma.rn.relu)
|
3409
|
+
,
|
3410
|
+
__half2_raw hzero;
|
3411
|
+
hzero.x = (unsigned short)0U;
|
3412
|
+
hzero.y = (unsigned short)0U;
|
3413
|
+
return __hmax2_nan(__hfma2(a, b, c), __half2(hzero));
|
3414
|
+
)
|
3415
|
+
}
|
3416
|
+
#endif /* defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)) || defined(_NVHPC_CUDA) */
|
3417
|
+
|
3418
|
+
#if defined(__CUDACC__) || defined(_NVHPC_CUDA)
|
3419
|
+
/* Define __PTR for atomicAdd prototypes below, undef after done */
|
3420
|
+
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
|
3421
|
+
#define __PTR "l"
|
3422
|
+
#else
|
3423
|
+
#define __PTR "r"
|
3424
|
+
#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/
|
3425
|
+
|
3426
|
+
__CUDA_FP16_DECL__ __half2 atomicAdd(__half2 *const address, const __half2 val) {
|
3427
|
+
NV_IF_ELSE_TARGET(NV_PROVIDES_SM_60,
|
3428
|
+
__half2 r;
|
3429
|
+
asm volatile ("{ atom.add.noftz.f16x2 %0,[%1],%2; }\n"
|
3430
|
+
: "=r"(__HALF2_TO_UI(r)) : __PTR(address), "r"(__HALF2_TO_CUI(val))
|
3431
|
+
: "memory");
|
3432
|
+
return r;
|
3433
|
+
,
|
3434
|
+
unsigned int* address_as_uint = (unsigned int*)address;
|
3435
|
+
unsigned int old = *address_as_uint;
|
3436
|
+
unsigned int assumed;
|
3437
|
+
do {
|
3438
|
+
assumed = old;
|
3439
|
+
__half2 new_val = __hadd2(val, *(__half2*)&assumed);
|
3440
|
+
old = atomicCAS(address_as_uint, assumed, *(unsigned int*)&new_val);
|
3441
|
+
} while (assumed != old);
|
3442
|
+
return *(__half2*)&old;
|
3443
|
+
)
|
3444
|
+
}
|
3445
|
+
|
3446
|
+
#if (defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700))) || defined(_NVHPC_CUDA)
|
3447
|
+
__CUDA_FP16_DECL__ __half atomicAdd(__half *const address, const __half val) {
|
3448
|
+
__half r;
|
3449
|
+
asm volatile ("{ atom.add.noftz.f16 %0,[%1],%2; }\n"
|
3450
|
+
: "=h"(__HALF_TO_US(r))
|
3451
|
+
: __PTR(address), "h"(__HALF_TO_CUS(val))
|
3452
|
+
: "memory");
|
3453
|
+
return r;
|
3454
|
+
}
|
3455
|
+
#endif /* (defined(__CUDACC__) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700))) || defined(_NVHPC_CUDA) */
|
3456
|
+
|
3457
|
+
#undef __PTR
|
3458
|
+
#endif /* defined(__CUDACC__) || defined(_NVHPC_CUDA) */
|
3459
|
+
#endif /* !(defined __DOXYGEN_ONLY__) */
|
3460
|
+
#endif /* defined(__cplusplus) */
|
3461
|
+
|
3462
|
+
#undef __TERNARY_OP_HALF2_MACRO
|
3463
|
+
#undef __TERNARY_OP_HALF_MACRO
|
3464
|
+
#undef __BINARY_OP_HALF2_MACRO
|
3465
|
+
#undef __BINARY_OP_HALF_MACRO
|
3466
|
+
|
3467
|
+
#undef __CUDA_HOSTDEVICE_FP16_DECL__
|
3468
|
+
#undef __CUDA_FP16_DECL__
|
3469
|
+
|
3470
|
+
#undef __HALF_TO_US
|
3471
|
+
#undef __HALF_TO_CUS
|
3472
|
+
#undef __HALF2_TO_UI
|
3473
|
+
#undef __HALF2_TO_CUI
|
3474
|
+
#undef __CUDA_FP16_CONSTEXPR__
|
3475
|
+
|
3476
|
+
#if defined(__CPP_VERSION_AT_LEAST_11_FP16)
|
3477
|
+
#undef __CPP_VERSION_AT_LEAST_11_FP16
|
3478
|
+
#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP16) */
|
3479
|
+
|
3480
|
+
#undef ___CUDA_FP16_STRINGIFY_INNERMOST
|
3481
|
+
#undef __CUDA_FP16_STRINGIFY
|
3482
|
+
|
3483
|
+
#endif /* end of include guard: __CUDA_FP16_HPP__ */
|