@fugood/llama.node 0.3.2 → 0.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. package/CMakeLists.txt +2 -0
  2. package/bin/darwin/arm64/llama-node.node +0 -0
  3. package/bin/darwin/x64/llama-node.node +0 -0
  4. package/bin/linux/arm64/llama-node.node +0 -0
  5. package/bin/linux/x64/llama-node.node +0 -0
  6. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  7. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  8. package/bin/win32/arm64/llama-node.node +0 -0
  9. package/bin/win32/arm64/node.lib +0 -0
  10. package/bin/win32/x64/llama-node.node +0 -0
  11. package/bin/win32/x64/node.lib +0 -0
  12. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  13. package/bin/win32-vulkan/arm64/node.lib +0 -0
  14. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  15. package/bin/win32-vulkan/x64/node.lib +0 -0
  16. package/package.json +1 -1
  17. package/src/DetokenizeWorker.cpp +1 -1
  18. package/src/EmbeddingWorker.cpp +2 -2
  19. package/src/LlamaCompletionWorker.cpp +8 -8
  20. package/src/LlamaCompletionWorker.h +2 -2
  21. package/src/LlamaContext.cpp +8 -9
  22. package/src/TokenizeWorker.cpp +1 -1
  23. package/src/common.hpp +4 -4
  24. package/src/llama.cpp/.github/workflows/build.yml +43 -9
  25. package/src/llama.cpp/.github/workflows/docker.yml +3 -0
  26. package/src/llama.cpp/CMakeLists.txt +7 -4
  27. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +16 -0
  28. package/src/llama.cpp/common/CMakeLists.txt +0 -2
  29. package/src/llama.cpp/common/arg.cpp +642 -607
  30. package/src/llama.cpp/common/arg.h +22 -22
  31. package/src/llama.cpp/common/common.cpp +79 -281
  32. package/src/llama.cpp/common/common.h +130 -100
  33. package/src/llama.cpp/common/json-schema-to-grammar.cpp +1 -1
  34. package/src/llama.cpp/common/log.cpp +50 -50
  35. package/src/llama.cpp/common/log.h +18 -18
  36. package/src/llama.cpp/common/ngram-cache.cpp +36 -36
  37. package/src/llama.cpp/common/ngram-cache.h +19 -19
  38. package/src/llama.cpp/common/sampling.cpp +116 -108
  39. package/src/llama.cpp/common/sampling.h +20 -20
  40. package/src/llama.cpp/docs/build.md +37 -17
  41. package/src/llama.cpp/examples/CMakeLists.txt +1 -1
  42. package/src/llama.cpp/examples/batched/batched.cpp +14 -14
  43. package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +10 -11
  44. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +1 -1
  45. package/src/llama.cpp/examples/cvector-generator/cvector-generator.cpp +9 -9
  46. package/src/llama.cpp/examples/embedding/embedding.cpp +12 -12
  47. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +8 -8
  48. package/src/llama.cpp/examples/export-lora/export-lora.cpp +5 -5
  49. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +7 -7
  50. package/src/llama.cpp/examples/gritlm/gritlm.cpp +18 -18
  51. package/src/llama.cpp/examples/imatrix/imatrix.cpp +20 -11
  52. package/src/llama.cpp/examples/infill/infill.cpp +40 -86
  53. package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +42 -151
  54. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +1 -0
  55. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +11 -14
  56. package/src/llama.cpp/examples/llava/clip.cpp +1 -0
  57. package/src/llama.cpp/examples/llava/llava-cli.cpp +23 -23
  58. package/src/llama.cpp/examples/llava/llava.cpp +37 -3
  59. package/src/llama.cpp/examples/llava/minicpmv-cli.cpp +21 -21
  60. package/src/llama.cpp/examples/lookahead/lookahead.cpp +26 -26
  61. package/src/llama.cpp/examples/lookup/lookup-create.cpp +7 -7
  62. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +4 -4
  63. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +14 -14
  64. package/src/llama.cpp/examples/lookup/lookup.cpp +29 -29
  65. package/src/llama.cpp/examples/main/main.cpp +64 -109
  66. package/src/llama.cpp/examples/parallel/parallel.cpp +18 -19
  67. package/src/llama.cpp/examples/passkey/passkey.cpp +14 -14
  68. package/src/llama.cpp/examples/perplexity/perplexity.cpp +99 -120
  69. package/src/llama.cpp/examples/quantize-stats/quantize-stats.cpp +10 -9
  70. package/src/llama.cpp/examples/retrieval/retrieval.cpp +13 -13
  71. package/src/llama.cpp/examples/rpc/rpc-server.cpp +3 -1
  72. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +34 -17
  73. package/src/llama.cpp/examples/server/CMakeLists.txt +4 -13
  74. package/src/llama.cpp/examples/server/server.cpp +553 -691
  75. package/src/llama.cpp/examples/server/utils.hpp +312 -25
  76. package/src/llama.cpp/examples/simple/CMakeLists.txt +1 -1
  77. package/src/llama.cpp/examples/simple/simple.cpp +128 -96
  78. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +5 -0
  79. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +197 -0
  80. package/src/llama.cpp/examples/speculative/speculative.cpp +54 -51
  81. package/src/llama.cpp/examples/tokenize/tokenize.cpp +2 -2
  82. package/src/llama.cpp/ggml/CMakeLists.txt +15 -9
  83. package/src/llama.cpp/ggml/include/ggml-amx.h +25 -0
  84. package/src/llama.cpp/ggml/include/ggml-backend.h +46 -33
  85. package/src/llama.cpp/ggml/include/ggml-blas.h +5 -3
  86. package/src/llama.cpp/ggml/include/ggml-cann.h +9 -7
  87. package/src/llama.cpp/ggml/include/ggml-cpp.h +38 -0
  88. package/src/llama.cpp/ggml/include/ggml-cpu.h +177 -0
  89. package/src/llama.cpp/ggml/include/ggml-cuda.h +12 -12
  90. package/src/llama.cpp/ggml/include/ggml-kompute.h +7 -3
  91. package/src/llama.cpp/ggml/include/ggml-metal.h +11 -7
  92. package/src/llama.cpp/ggml/include/ggml-opt.h +216 -0
  93. package/src/llama.cpp/ggml/include/ggml-rpc.h +9 -5
  94. package/src/llama.cpp/ggml/include/ggml-sycl.h +18 -11
  95. package/src/llama.cpp/ggml/include/ggml-vulkan.h +10 -8
  96. package/src/llama.cpp/ggml/include/ggml.h +53 -393
  97. package/src/llama.cpp/ggml/src/CMakeLists.txt +66 -1149
  98. package/src/llama.cpp/ggml/src/ggml-aarch64.c +46 -3126
  99. package/src/llama.cpp/ggml/src/ggml-aarch64.h +0 -20
  100. package/src/llama.cpp/ggml/src/ggml-alloc.c +23 -27
  101. package/src/llama.cpp/ggml/src/ggml-amx/CMakeLists.txt +107 -0
  102. package/src/llama.cpp/ggml/src/ggml-amx/common.h +94 -0
  103. package/src/llama.cpp/ggml/src/ggml-amx/ggml-amx.cpp +446 -0
  104. package/src/llama.cpp/ggml/src/ggml-amx/mmq.cpp +2510 -0
  105. package/src/llama.cpp/ggml/src/ggml-amx/mmq.h +17 -0
  106. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +6 -25
  107. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +195 -0
  108. package/src/llama.cpp/ggml/src/ggml-backend.cpp +303 -864
  109. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +91 -0
  110. package/src/llama.cpp/ggml/src/{ggml-blas.cpp → ggml-blas/ggml-blas.cpp} +213 -65
  111. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +46 -0
  112. package/src/llama.cpp/ggml/src/{ggml-cann.cpp → ggml-cann/ggml-cann.cpp} +255 -149
  113. package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +261 -0
  114. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.c +3560 -0
  115. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +30 -0
  116. package/src/llama.cpp/ggml/src/{ggml-cpu-impl.h → ggml-cpu/ggml-cpu-impl.h} +0 -243
  117. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +10822 -0
  118. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.h +63 -0
  119. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +13970 -0
  120. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +663 -0
  121. package/src/llama.cpp/ggml/src/{llamafile → ggml-cpu/llamafile}/sgemm.cpp +667 -1
  122. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +155 -0
  123. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +106 -0
  124. package/src/llama.cpp/ggml/src/ggml-impl.h +366 -16
  125. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +162 -0
  126. package/src/llama.cpp/ggml/src/{ggml-kompute.cpp → ggml-kompute/ggml-kompute.cpp} +238 -72
  127. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +108 -0
  128. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +249 -0
  129. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +100 -0
  130. package/src/llama.cpp/ggml/src/ggml-opt.cpp +867 -0
  131. package/src/llama.cpp/ggml/src/ggml-quants.c +187 -10692
  132. package/src/llama.cpp/ggml/src/ggml-quants.h +78 -125
  133. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +11 -0
  134. package/src/llama.cpp/ggml/src/{ggml-rpc.cpp → ggml-rpc/ggml-rpc.cpp} +475 -300
  135. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +81 -0
  136. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +3 -0
  137. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +40 -0
  138. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +258 -0
  139. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +1 -0
  140. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +2 -22
  141. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +1011 -0
  142. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +76 -0
  143. package/src/llama.cpp/ggml/src/{ggml-sycl.cpp → ggml-sycl/ggml-sycl.cpp} +3584 -4142
  144. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +69 -67
  145. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +3 -3
  146. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +56 -0
  147. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +11 -0
  148. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +6 -0
  149. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +4 -4
  150. package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.cpp +138 -0
  151. package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.hpp +10 -0
  152. package/src/llama.cpp/ggml/src/ggml-threading.cpp +12 -0
  153. package/src/llama.cpp/ggml/src/ggml-threading.h +12 -0
  154. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +78 -0
  155. package/src/llama.cpp/ggml/src/{ggml-vulkan.cpp → ggml-vulkan/ggml-vulkan.cpp} +555 -623
  156. package/src/llama.cpp/ggml/src/{vulkan-shaders → ggml-vulkan/vulkan-shaders}/vulkan-shaders-gen.cpp +125 -206
  157. package/src/llama.cpp/ggml/src/ggml.c +4032 -19890
  158. package/src/llama.cpp/include/llama.h +67 -33
  159. package/src/llama.cpp/pocs/vdot/q8dot.cpp +4 -3
  160. package/src/llama.cpp/pocs/vdot/vdot.cpp +8 -7
  161. package/src/llama.cpp/src/CMakeLists.txt +2 -1
  162. package/src/llama.cpp/src/llama-sampling.cpp +745 -105
  163. package/src/llama.cpp/src/llama-sampling.h +21 -2
  164. package/src/llama.cpp/src/llama-vocab.cpp +49 -9
  165. package/src/llama.cpp/src/llama-vocab.h +35 -11
  166. package/src/llama.cpp/src/llama.cpp +2636 -2406
  167. package/src/llama.cpp/src/unicode-data.cpp +2 -2
  168. package/src/llama.cpp/tests/CMakeLists.txt +1 -2
  169. package/src/llama.cpp/tests/test-arg-parser.cpp +14 -14
  170. package/src/llama.cpp/tests/test-backend-ops.cpp +185 -60
  171. package/src/llama.cpp/tests/test-barrier.cpp +1 -0
  172. package/src/llama.cpp/tests/test-chat-template.cpp +9 -5
  173. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +17 -4
  174. package/src/llama.cpp/tests/test-log.cpp +2 -2
  175. package/src/llama.cpp/tests/test-opt.cpp +853 -142
  176. package/src/llama.cpp/tests/test-quantize-fns.cpp +22 -19
  177. package/src/llama.cpp/tests/test-quantize-perf.cpp +16 -14
  178. package/src/llama.cpp/tests/test-rope.cpp +1 -0
  179. package/src/llama.cpp/tests/test-sampling.cpp +162 -137
  180. package/src/llama.cpp/tests/test-tokenizer-0.cpp +7 -7
  181. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +5 -5
  182. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +5 -5
  183. package/src/llama.cpp/common/train.cpp +0 -1515
  184. package/src/llama.cpp/common/train.h +0 -233
  185. package/src/llama.cpp/examples/baby-llama/CMakeLists.txt +0 -5
  186. package/src/llama.cpp/examples/baby-llama/baby-llama.cpp +0 -1639
  187. package/src/llama.cpp/tests/test-grad0.cpp +0 -1683
  188. /package/src/llama.cpp/ggml/{cmake → src/ggml-cpu/cmake}/FindSIMD.cmake +0 -0
  189. /package/src/llama.cpp/ggml/src/{llamafile → ggml-cpu/llamafile}/sgemm.h +0 -0
  190. /package/src/llama.cpp/ggml/src/{vulkan-shaders → ggml-vulkan/vulkan-shaders}/CMakeLists.txt +0 -0
@@ -0,0 +1,155 @@
1
+ cmake_minimum_required(VERSION 3.18) # for CMAKE_CUDA_ARCHITECTURES
2
+
3
+ find_package(CUDAToolkit)
4
+
5
+ if (CUDAToolkit_FOUND)
6
+ message(STATUS "CUDA Toolkit found")
7
+
8
+ if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
9
+ # native == GPUs available at build time
10
+ # 52 == Maxwell, lowest CUDA 12 standard
11
+ # 60 == P100, FP16 CUDA intrinsics
12
+ # 61 == Pascal, __dp4a instruction (per-byte integer dot product)
13
+ # 70 == V100, FP16 tensor cores
14
+ # 75 == Turing, int8 tensor cores
15
+ if (GGML_NATIVE AND CUDAToolkit_VERSION VERSION_GREATER_EQUAL "11.6")
16
+ set(CMAKE_CUDA_ARCHITECTURES "native")
17
+ elseif(GGML_CUDA_F16 OR GGML_CUDA_DMMV_F16)
18
+ set(CMAKE_CUDA_ARCHITECTURES "60;61;70;75")
19
+ else()
20
+ set(CMAKE_CUDA_ARCHITECTURES "52;61;70;75")
21
+ endif()
22
+ endif()
23
+ message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}")
24
+
25
+ enable_language(CUDA)
26
+
27
+ file(GLOB GGML_HEADERS_CUDA "*.cuh")
28
+ list(APPEND GGML_HEADERS_CUDA "../../include/ggml-cuda.h")
29
+
30
+ file(GLOB GGML_SOURCES_CUDA "*.cu")
31
+ file(GLOB SRCS "template-instances/fattn-wmma*.cu")
32
+ list(APPEND GGML_SOURCES_CUDA ${SRCS})
33
+ file(GLOB SRCS "template-instances/mmq*.cu")
34
+ list(APPEND GGML_SOURCES_CUDA ${SRCS})
35
+
36
+ if (GGML_CUDA_FA_ALL_QUANTS)
37
+ file(GLOB SRCS "template-instances/fattn-vec*.cu")
38
+ list(APPEND GGML_SOURCES_CUDA ${SRCS})
39
+ add_compile_definitions(GGML_CUDA_FA_ALL_QUANTS)
40
+ else()
41
+ file(GLOB SRCS "template-instances/fattn-vec*q4_0-q4_0.cu")
42
+ list(APPEND GGML_SOURCES_CUDA ${SRCS})
43
+ file(GLOB SRCS "template-instances/fattn-vec*q8_0-q8_0.cu")
44
+ list(APPEND GGML_SOURCES_CUDA ${SRCS})
45
+ file(GLOB SRCS "template-instances/fattn-vec*f16-f16.cu")
46
+ list(APPEND GGML_SOURCES_CUDA ${SRCS})
47
+ endif()
48
+
49
+ add_library(ggml-cuda
50
+ ${GGML_HEADERS_CUDA}
51
+ ${GGML_SOURCES_CUDA}
52
+ )
53
+
54
+ target_link_libraries(ggml-cuda PRIVATE ggml-base)
55
+ target_include_directories(ggml-cuda PRIVATE . ..)
56
+
57
+ add_compile_definitions(GGML_CUDA_PEER_MAX_BATCH_SIZE=${GGML_CUDA_PEER_MAX_BATCH_SIZE})
58
+
59
+ if (GGML_CUDA_GRAPHS)
60
+ add_compile_definitions(GGML_CUDA_USE_GRAPHS)
61
+ endif()
62
+
63
+ if (GGML_CUDA_FORCE_MMQ)
64
+ add_compile_definitions(GGML_CUDA_FORCE_MMQ)
65
+ endif()
66
+
67
+ if (GGML_CUDA_FORCE_CUBLAS)
68
+ add_compile_definitions(GGML_CUDA_FORCE_CUBLAS)
69
+ endif()
70
+
71
+ if (GGML_CUDA_NO_VMM)
72
+ add_compile_definitions(GGML_CUDA_NO_VMM)
73
+ endif()
74
+
75
+ if (GGML_CUDA_F16 OR GGML_CUDA_DMMV_F16)
76
+ add_compile_definitions(GGML_CUDA_F16)
77
+ endif()
78
+
79
+ if (GGML_CUDA_NO_PEER_COPY)
80
+ add_compile_definitions(GGML_CUDA_NO_PEER_COPY)
81
+ endif()
82
+
83
+ if (GGML_STATIC)
84
+ if (WIN32)
85
+ # As of 12.3.1 CUDA Toolkit for Windows does not offer a static cublas library
86
+ target_link_libraries(ggml-cuda PRIVATE CUDA::cudart_static CUDA::cublas CUDA::cublasLt)
87
+ else ()
88
+ target_link_libraries(ggml-cuda PRIVATE CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)
89
+ endif()
90
+ else()
91
+ target_link_libraries(ggml-cuda PRIVATE CUDA::cudart CUDA::cublas CUDA::cublasLt)
92
+ endif()
93
+
94
+ if (GGML_CUDA_NO_VMM)
95
+ # No VMM requested, no need to link directly with the cuda driver lib (libcuda.so)
96
+ else()
97
+ target_link_libraries(ggml-cuda PRIVATE CUDA::cuda_driver)
98
+ endif()
99
+
100
+ set(CUDA_CXX_FLAGS "")
101
+
102
+ set(CUDA_FLAGS -use_fast_math)
103
+
104
+ if (GGML_FATAL_WARNINGS)
105
+ list(APPEND CUDA_FLAGS -Werror all-warnings)
106
+ endif()
107
+
108
+ if (GGML_ALL_WARNINGS AND NOT MSVC)
109
+ set(NVCC_CMD ${CMAKE_CUDA_COMPILER} .c)
110
+ if (NOT CMAKE_CUDA_HOST_COMPILER STREQUAL "")
111
+ list(APPEND NVCC_CMD -ccbin ${CMAKE_CUDA_HOST_COMPILER})
112
+ endif()
113
+
114
+ execute_process(
115
+ COMMAND ${NVCC_CMD} -Xcompiler --version
116
+ OUTPUT_VARIABLE CUDA_CCFULLVER
117
+ ERROR_QUIET
118
+ )
119
+
120
+ if (NOT CUDA_CCFULLVER MATCHES clang)
121
+ set(CUDA_CCID "GNU")
122
+ execute_process(
123
+ COMMAND ${NVCC_CMD} -Xcompiler "-dumpfullversion -dumpversion"
124
+ OUTPUT_VARIABLE CUDA_CCVER
125
+ ERROR_QUIET
126
+ )
127
+ else()
128
+ if (CUDA_CCFULLVER MATCHES Apple)
129
+ set(CUDA_CCID "AppleClang")
130
+ else()
131
+ set(CUDA_CCID "Clang")
132
+ endif()
133
+ string(REGEX REPLACE "^.* version ([0-9.]*).*$" "\\1" CUDA_CCVER ${CUDA_CCFULLVER})
134
+ endif()
135
+
136
+ message("-- CUDA host compiler is ${CUDA_CCID} ${CUDA_CCVER}")
137
+
138
+ get_flags(${CUDA_CCID} ${CUDA_CCVER})
139
+ list(APPEND CUDA_CXX_FLAGS ${CXX_FLAGS} ${GF_CXX_FLAGS}) # This is passed to -Xcompiler later
140
+ endif()
141
+
142
+ if (NOT MSVC)
143
+ list(APPEND CUDA_CXX_FLAGS -Wno-pedantic)
144
+ endif()
145
+
146
+ list(JOIN CUDA_CXX_FLAGS " " CUDA_CXX_FLAGS_JOINED) # pass host compiler flags as a single argument
147
+
148
+ if (NOT CUDA_CXX_FLAGS_JOINED STREQUAL "")
149
+ list(APPEND CUDA_FLAGS -Xcompiler ${CUDA_CXX_FLAGS_JOINED})
150
+ endif()
151
+
152
+ add_compile_options("$<$<COMPILE_LANGUAGE:CUDA>:${CUDA_FLAGS}>")
153
+ else()
154
+ message(FATAL_ERROR "CUDA Toolkit not found")
155
+ endif()
@@ -0,0 +1,106 @@
1
+ if (NOT EXISTS $ENV{ROCM_PATH})
2
+ if (NOT EXISTS /opt/rocm)
3
+ set(ROCM_PATH /usr)
4
+ else()
5
+ set(ROCM_PATH /opt/rocm)
6
+ endif()
7
+ else()
8
+ set(ROCM_PATH $ENV{ROCM_PATH})
9
+ endif()
10
+
11
+ list(APPEND CMAKE_PREFIX_PATH ${ROCM_PATH})
12
+ list(APPEND CMAKE_PREFIX_PATH "${ROCM_PATH}/lib64/cmake")
13
+
14
+ # CMake on Windows doesn't support the HIP language yet
15
+ if (WIN32)
16
+ set(CXX_IS_HIPCC TRUE)
17
+ else()
18
+ string(REGEX MATCH "hipcc(\.bat)?$" CXX_IS_HIPCC "${CMAKE_CXX_COMPILER}")
19
+ endif()
20
+
21
+ if (CXX_IS_HIPCC)
22
+ if (LINUX)
23
+ if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
24
+ message(WARNING "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++")
25
+ endif()
26
+
27
+ message(WARNING "Setting hipcc as the C++ compiler is legacy behavior."
28
+ " Prefer setting the HIP compiler directly. See README for details.")
29
+ endif()
30
+ else()
31
+ # Forward AMDGPU_TARGETS to CMAKE_HIP_ARCHITECTURES.
32
+ if (AMDGPU_TARGETS AND NOT CMAKE_HIP_ARCHITECTURES)
33
+ set(CMAKE_HIP_ARCHITECTURES ${AMDGPU_TARGETS})
34
+ endif()
35
+ cmake_minimum_required(VERSION 3.21)
36
+ enable_language(HIP)
37
+ endif()
38
+
39
+ find_package(hip REQUIRED)
40
+ find_package(hipblas REQUIRED)
41
+ find_package(rocblas REQUIRED)
42
+
43
+ message(STATUS "HIP and hipBLAS found")
44
+
45
+ file(GLOB GGML_HEADERS_ROCM "../ggml-cuda/*.cuh")
46
+ list(APPEND GGML_HEADERS_ROCM "../../include/ggml-cuda.h")
47
+
48
+ file(GLOB GGML_SOURCES_ROCM "../ggml-cuda/*.cu")
49
+ file(GLOB SRCS "../ggml-cuda/template-instances/fattn-wmma*.cu")
50
+ list(APPEND GGML_SOURCES_ROCM ${SRCS})
51
+ file(GLOB SRCS "../ggml-cuda/template-instances/mmq*.cu")
52
+ list(APPEND GGML_SOURCES_ROCM ${SRCS})
53
+
54
+ if (GGML_CUDA_FA_ALL_QUANTS)
55
+ file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*.cu")
56
+ list(APPEND GGML_SOURCES_ROCM ${SRCS})
57
+ add_compile_definitions(GGML_CUDA_FA_ALL_QUANTS)
58
+ else()
59
+ file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu")
60
+ list(APPEND GGML_SOURCES_ROCM ${SRCS})
61
+ file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu")
62
+ list(APPEND GGML_SOURCES_ROCM ${SRCS})
63
+ file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*f16-f16.cu")
64
+ list(APPEND GGML_SOURCES_ROCM ${SRCS})
65
+ endif()
66
+
67
+ add_library(ggml-hip
68
+ ${GGML_HEADERS_ROCM}
69
+ ${GGML_SOURCES_ROCM})
70
+
71
+ target_link_libraries(ggml-hip PRIVATE ggml-base)
72
+ target_include_directories(ggml-hip PRIVATE . ..)
73
+
74
+ # TODO: do not use CUDA definitions for HIP
75
+ target_compile_definitions(ggml PUBLIC GGML_USE_CUDA)
76
+
77
+ add_compile_definitions(GGML_USE_HIP)
78
+
79
+ if (GGML_HIP_UMA)
80
+ add_compile_definitions(GGML_HIP_UMA)
81
+ endif()
82
+
83
+ if (GGML_CUDA_FORCE_MMQ)
84
+ add_compile_definitions(GGML_CUDA_FORCE_MMQ)
85
+ endif()
86
+
87
+ if (GGML_CUDA_FORCE_CUBLAS)
88
+ add_compile_definitions(GGML_CUDA_FORCE_CUBLAS)
89
+ endif()
90
+
91
+ if (GGML_CUDA_NO_PEER_COPY)
92
+ add_compile_definitions(GGML_CUDA_NO_PEER_COPY)
93
+ endif()
94
+
95
+ if (CXX_IS_HIPCC)
96
+ set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX)
97
+ target_link_libraries(ggml-hip PRIVATE hip::device)
98
+ else()
99
+ set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE HIP)
100
+ endif()
101
+
102
+ if (GGML_STATIC)
103
+ message(FATAL_ERROR "Static linking not supported for HIP/ROCm")
104
+ endif()
105
+
106
+ target_link_libraries(ggml-hip PRIVATE ggml-base hip::host roc::rocblas roc::hipblas)
@@ -3,11 +3,28 @@
3
3
  // GGML internal header
4
4
 
5
5
  #include "ggml.h"
6
-
7
6
  #include <assert.h>
7
+ #include <math.h>
8
8
  #include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
9
9
  #include <stdbool.h>
10
10
  #include <stdint.h>
11
+ #include <string.h>
12
+
13
+ #ifdef __ARM_FEATURE_SVE
14
+ #include <arm_sve.h>
15
+ #endif // __ARM_FEATURE_SVE
16
+
17
+ #if defined(__ARM_NEON)
18
+ // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
19
+ //
20
+ // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
21
+ //
22
+ #include <arm_neon.h>
23
+ #endif
24
+
25
+ #if defined(__F16C__)
26
+ #include <immintrin.h>
27
+ #endif
11
28
 
12
29
  #ifdef __cplusplus
13
30
  extern "C" {
@@ -19,20 +36,37 @@ extern "C" {
19
36
  #define MIN(a, b) ((a) < (b) ? (a) : (b))
20
37
  #define MAX(a, b) ((a) > (b) ? (a) : (b))
21
38
 
39
+ // required for mmap as gguf only guarantees 32-byte alignment
40
+ #define TENSOR_ALIGNMENT 32
41
+
22
42
  // static_assert should be a #define, but if it's not,
23
43
  // fall back to the _Static_assert C11 keyword.
24
44
  // if C99 - static_assert is noop
25
45
  // ref: https://stackoverflow.com/a/53923785/4039976
26
46
  #ifndef __cplusplus
27
- #ifndef static_assert
28
- #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
29
- #define static_assert(cond, msg) _Static_assert(cond, msg)
30
- #else
31
- #define static_assert(cond, msg) struct global_scope_noop_trick
32
- #endif
33
- #endif
47
+ #ifndef static_assert
48
+ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
49
+ #define static_assert(cond, msg) _Static_assert(cond, msg)
50
+ #else
51
+ #define static_assert(cond, msg) struct global_scope_noop_trick
52
+ #endif
53
+ #endif
34
54
  #endif
35
55
 
56
+ static inline int ggml_up32(int n) {
57
+ return (n + 31) & ~31;
58
+ }
59
+
60
+ //static inline int ggml_up64(int n) {
61
+ // return (n + 63) & ~63;
62
+ //}
63
+
64
+ static inline int ggml_up(int n, int m) {
65
+ // assert m is a power of 2
66
+ GGML_ASSERT((m & (m - 1)) == 0);
67
+ return (n + m - 1) & ~(m - 1);
68
+ }
69
+
36
70
  //
37
71
  // logging
38
72
  //
@@ -48,6 +82,72 @@ void ggml_log_callback_default(enum ggml_log_level level, const char * text, voi
48
82
  #define GGML_LOG_DEBUG(...) ggml_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
49
83
  #define GGML_LOG_CONT(...) ggml_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__)
50
84
 
85
+ #define GGML_DEBUG 0
86
+
87
+ #if (GGML_DEBUG >= 1)
88
+ #define GGML_PRINT_DEBUG(...) GGML_LOG_DEBUG(__VA_ARGS__)
89
+ #else
90
+ #define GGML_PRINT_DEBUG(...)
91
+ #endif
92
+
93
+ #if (GGML_DEBUG >= 5)
94
+ #define GGML_PRINT_DEBUG_5(...) GGML_LOG_DEBUG(__VA_ARGS__)
95
+ #else
96
+ #define GGML_PRINT_DEBUG_5(...)
97
+ #endif
98
+
99
+ #if (GGML_DEBUG >= 10)
100
+ #define GGML_PRINT_DEBUG_10(...) GGML_LOG_DEBUG(__VA_ARGS__)
101
+ #else
102
+ #define GGML_PRINT_DEBUG_10(...)
103
+ #endif
104
+
105
+ // tensor params
106
+
107
+ static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
108
+ GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
109
+ assert(params_size <= GGML_MAX_OP_PARAMS);
110
+ memcpy(tensor->op_params, params, params_size);
111
+ }
112
+
113
+ static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) {
114
+ assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
115
+ return ((const int32_t *)(tensor->op_params))[i];
116
+ }
117
+
118
+ static float ggml_get_op_params_f32(const struct ggml_tensor * tensor, uint32_t i) {
119
+ assert(i < GGML_MAX_OP_PARAMS / sizeof(float));
120
+ return ((const float *)(tensor->op_params))[i];
121
+ }
122
+
123
+ static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) {
124
+ assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
125
+ ((int32_t *)(tensor->op_params))[i] = value;
126
+ }
127
+
128
+ static void ggml_set_op_params_f32(struct ggml_tensor * tensor, uint32_t i, float value) {
129
+ assert(i < GGML_MAX_OP_PARAMS / sizeof(float));
130
+ ((float *)(tensor->op_params))[i] = value;
131
+ }
132
+
133
+ struct ggml_map_custom1_op_params {
134
+ ggml_custom1_op_t fun;
135
+ int n_tasks;
136
+ void * userdata;
137
+ };
138
+
139
+ struct ggml_map_custom2_op_params {
140
+ ggml_custom2_op_t fun;
141
+ int n_tasks;
142
+ void * userdata;
143
+ };
144
+
145
+ struct ggml_map_custom3_op_params {
146
+ ggml_custom3_op_t fun;
147
+ int n_tasks;
148
+ void * userdata;
149
+ };
150
+
51
151
  // bitset
52
152
 
53
153
  typedef uint32_t ggml_bitset_t;
@@ -96,7 +196,7 @@ void ggml_hash_set_reset(struct ggml_hash_set * hash_set);
96
196
  static bool ggml_hash_contains(const struct ggml_hash_set * hash_set, struct ggml_tensor * key);
97
197
 
98
198
  // returns GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted
99
- static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, struct ggml_tensor * key);
199
+ static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, const struct ggml_tensor * key);
100
200
 
101
201
  // returns GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
102
202
  static size_t ggml_hash_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key);
@@ -110,7 +210,7 @@ static inline size_t ggml_hash(const struct ggml_tensor * p) {
110
210
  return (size_t)(uintptr_t)p >> 4;
111
211
  }
112
212
 
113
- static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, struct ggml_tensor * key) {
213
+ static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, const struct ggml_tensor * key) {
114
214
  size_t h = ggml_hash(key) % hash_set->size;
115
215
 
116
216
  // linear probing
@@ -181,13 +281,14 @@ enum ggml_cgraph_eval_order {
181
281
  };
182
282
 
183
283
  struct ggml_cgraph {
184
- int size;
185
- int n_nodes;
186
- int n_leafs;
284
+ int size; // maximum number of nodes/leafs/grads/grad_accs
285
+ int n_nodes; // number of nodes currently in use
286
+ int n_leafs; // number of leafs currently in use
187
287
 
188
- struct ggml_tensor ** nodes;
189
- struct ggml_tensor ** grads;
190
- struct ggml_tensor ** leafs;
288
+ struct ggml_tensor ** nodes; // tensors with data that can change if the graph is evaluated
289
+ struct ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes
290
+ struct ggml_tensor ** grad_accs; // accumulators for node gradients
291
+ struct ggml_tensor ** leafs; // tensors with constant data
191
292
 
192
293
  struct ggml_hash_set visited_hash_set;
193
294
 
@@ -196,6 +297,255 @@ struct ggml_cgraph {
196
297
 
197
298
  struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph, int i0, int i1);
198
299
 
300
+ // Memory allocation
301
+
302
+ void * ggml_aligned_malloc(size_t size);
303
+ void ggml_aligned_free(void * ptr, size_t size);
304
+
305
+ // FP16 to FP32 conversion
306
+
307
+ #if defined(__ARM_NEON)
308
+ #ifdef _MSC_VER
309
+ typedef uint16_t ggml_fp16_internal_t;
310
+ #else
311
+ typedef __fp16 ggml_fp16_internal_t;
312
+ #endif
313
+ #endif
314
+
315
+ #if defined(__ARM_NEON) && !defined(_MSC_VER)
316
+ #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
317
+ #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
318
+
319
+ #define GGML_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
320
+
321
+ static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
322
+ ggml_fp16_internal_t tmp;
323
+ memcpy(&tmp, &h, sizeof(ggml_fp16_t));
324
+ return (float)tmp;
325
+ }
326
+
327
+ static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
328
+ ggml_fp16_t res;
329
+ ggml_fp16_internal_t tmp = f;
330
+ memcpy(&res, &tmp, sizeof(ggml_fp16_t));
331
+ return res;
332
+ }
333
+
334
+ #elif defined(__F16C__)
335
+
336
+ #ifdef _MSC_VER
337
+ #define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
338
+ #define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
339
+ #else
340
+ #define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
341
+ #define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
342
+ #endif
343
+
344
+ #elif defined(__POWER9_VECTOR__)
345
+
346
+ #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
347
+ #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
348
+ /* the inline asm below is about 12% faster than the lookup method */
349
+ #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
350
+ #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
351
+
352
+ static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
353
+ register float f;
354
+ register double d;
355
+ __asm__(
356
+ "mtfprd %0,%2\n"
357
+ "xscvhpdp %0,%0\n"
358
+ "frsp %1,%0\n" :
359
+ /* temp */ "=d"(d),
360
+ /* out */ "=f"(f):
361
+ /* in */ "r"(h));
362
+ return f;
363
+ }
364
+
365
+ static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
366
+ register double d;
367
+ register ggml_fp16_t r;
368
+ __asm__( /* xscvdphp can work on double or single precision */
369
+ "xscvdphp %0,%2\n"
370
+ "mffprd %1,%0\n" :
371
+ /* temp */ "=d"(d),
372
+ /* out */ "=r"(r):
373
+ /* in */ "f"(f));
374
+ return r;
375
+ }
376
+
377
+ #else
378
+
379
+ // FP16 <-> FP32
380
+ // ref: https://github.com/Maratyszcza/FP16
381
+
382
+ static inline float fp32_from_bits(uint32_t w) {
383
+ union {
384
+ uint32_t as_bits;
385
+ float as_value;
386
+ } fp32;
387
+ fp32.as_bits = w;
388
+ return fp32.as_value;
389
+ }
390
+
391
+ static inline uint32_t fp32_to_bits(float f) {
392
+ union {
393
+ float as_value;
394
+ uint32_t as_bits;
395
+ } fp32;
396
+ fp32.as_value = f;
397
+ return fp32.as_bits;
398
+ }
399
+
400
+ static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
401
+ const uint32_t w = (uint32_t) h << 16;
402
+ const uint32_t sign = w & UINT32_C(0x80000000);
403
+ const uint32_t two_w = w + w;
404
+
405
+ const uint32_t exp_offset = UINT32_C(0xE0) << 23;
406
+ #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
407
+ const float exp_scale = 0x1.0p-112f;
408
+ #else
409
+ const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
410
+ #endif
411
+ const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
412
+
413
+ const uint32_t magic_mask = UINT32_C(126) << 23;
414
+ const float magic_bias = 0.5f;
415
+ const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
416
+
417
+ const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
418
+ const uint32_t result = sign |
419
+ (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
420
+ return fp32_from_bits(result);
421
+ }
422
+
423
+ static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
424
+ #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
425
+ const float scale_to_inf = 0x1.0p+112f;
426
+ const float scale_to_zero = 0x1.0p-110f;
427
+ #else
428
+ const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
429
+ const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
430
+ #endif
431
+ float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
432
+
433
+ const uint32_t w = fp32_to_bits(f);
434
+ const uint32_t shl1_w = w + w;
435
+ const uint32_t sign = w & UINT32_C(0x80000000);
436
+ uint32_t bias = shl1_w & UINT32_C(0xFF000000);
437
+ if (bias < UINT32_C(0x71000000)) {
438
+ bias = UINT32_C(0x71000000);
439
+ }
440
+
441
+ base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
442
+ const uint32_t bits = fp32_to_bits(base);
443
+ const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
444
+ const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
445
+ const uint32_t nonsign = exp_bits + mantissa_bits;
446
+ return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
447
+ }
448
+
449
+ #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
450
+ #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
451
+
452
+ #endif // defined(__ARM_NEON) && (!defined(__MSC_VER)
453
+
454
+ // precomputed f32 table for f16 (256 KB)
455
+ // defined in ggml.c, initialized in ggml_init()
456
+ GGML_API float ggml_table_f32_f16[1 << 16];
457
+
458
+ // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
459
+ // so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
460
+ // This is also true for POWER9.
461
+ #if !defined(GGML_FP16_TO_FP32)
462
+ inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
463
+ uint16_t s;
464
+ memcpy(&s, &f, sizeof(uint16_t));
465
+ return ggml_table_f32_f16[s];
466
+ }
467
+
468
+ #define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
469
+ #endif
470
+
471
+ #if !defined(GGML_FP32_TO_FP16)
472
+ #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
473
+ #endif
474
+
475
+ /**
476
+ * Converts brain16 to float32.
477
+ *
478
+ * The bfloat16 floating point format has the following structure:
479
+ *
480
+ * ┌sign
481
+ * │
482
+ * │ ┌exponent
483
+ * │ │
484
+ * │ │ ┌mantissa
485
+ * │ │ │
486
+ * │┌──┴───┐┌─┴───┐
487
+ * 0b0000000000000000 brain16
488
+ *
489
+ * Since bf16 has the same number of exponent bits as a 32bit float,
490
+ * encoding and decoding numbers becomes relatively straightforward.
491
+ *
492
+ * ┌sign
493
+ * │
494
+ * │ ┌exponent
495
+ * │ │
496
+ * │ │ ┌mantissa
497
+ * │ │ │
498
+ * │┌──┴───┐┌─┴───────────────────┐
499
+ * 0b00000000000000000000000000000000 IEEE binary32
500
+ *
501
+ * For comparison, the standard fp16 format has fewer exponent bits.
502
+ *
503
+ * ┌sign
504
+ * │
505
+ * │ ┌exponent
506
+ * │ │
507
+ * │ │ ┌mantissa
508
+ * │ │ │
509
+ * │┌─┴─┐┌─┴──────┐
510
+ * 0b0000000000000000 IEEE binary16
511
+ *
512
+ * @see IEEE 754-2008
513
+ */
514
+ static inline float ggml_compute_bf16_to_fp32(ggml_bf16_t h) {
515
+ union {
516
+ float f;
517
+ uint32_t i;
518
+ } u;
519
+ u.i = (uint32_t)h.bits << 16;
520
+ return u.f;
521
+ }
522
+
523
+ /**
524
+ * Converts float32 to brain16.
525
+ *
526
+ * This is binary identical with Google Brain float conversion.
527
+ * Floats shall round to nearest even, and NANs shall be quiet.
528
+ * Subnormals aren't flushed to zero, except perhaps when used.
529
+ * This code should vectorize nicely if using modern compilers.
530
+ */
531
+ static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) {
532
+ ggml_bf16_t h;
533
+ union {
534
+ float f;
535
+ uint32_t i;
536
+ } u;
537
+ u.f = s;
538
+ if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */
539
+ h.bits = (u.i >> 16) | 64; /* force to quiet */
540
+ return h;
541
+ }
542
+ h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16;
543
+ return h;
544
+ }
545
+
546
+ #define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x)
547
+ #define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x)
548
+
199
549
  #ifdef __cplusplus
200
550
  }
201
551
  #endif