cactus-react-native 0.0.1 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.txt +20 -0
- package/README.md +3 -1
- package/android/src/main/CMakeLists.txt +60 -21
- package/android/src/main/java/com/cactus/Cactus.java +465 -0
- package/android/src/main/java/com/cactus/LlamaContext.java +199 -0
- package/android/src/main/jni.cpp +325 -10
- package/android/src/main/jniLibs/arm64-v8a/libcactus.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/libcactus.so +0 -0
- package/android/src/main/jniLibs/x86_64/libcactus_x86_64.so +0 -0
- package/android/src/newarch/java/com/cactus/CactusModule.java +79 -7
- package/android/src/oldarch/java/com/cactus/CactusModule.java +70 -0
- package/cactus-react-native.podspec +0 -3
- package/ios/CMakeLists.txt +56 -36
- package/ios/Cactus.mm +243 -2
- package/ios/CactusContext.h +22 -0
- package/ios/CactusContext.mm +176 -1
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/cactus.h +92 -5
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/cactus_ffi.h +229 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/chat.h +2 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/common.h +42 -51
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-backend.h +4 -4
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-common.h +12 -6
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpp.h +1 -1
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu.h +5 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-impl.h +52 -18
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-metal-impl.h +106 -14
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-opt.h +49 -28
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml.h +87 -106
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-arch.h +16 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-batch.h +2 -1
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-chat.h +7 -2
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-context.h +44 -33
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-cparams.h +1 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-graph.h +83 -17
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-hparams.h +44 -2
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-kv-cache.h +407 -179
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-memory.h +13 -2
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-model-loader.h +5 -3
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-model-saver.h +37 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-model.h +24 -2
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-vocab.h +6 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama.h +102 -142
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/minja/chat-template.hpp +23 -11
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/minja/minja.hpp +186 -127
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Info.plist +0 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/cactus +0 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/ggml-llama.metallib +0 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/cactus.h +92 -5
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/cactus_ffi.h +229 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/chat.h +2 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/common.h +42 -51
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-backend.h +4 -4
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-common.h +12 -6
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpp.h +1 -1
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu.h +5 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-impl.h +52 -18
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-metal-impl.h +106 -14
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-opt.h +49 -28
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml.h +87 -106
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-arch.h +16 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-batch.h +2 -1
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-chat.h +7 -2
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-context.h +44 -33
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-cparams.h +1 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-graph.h +83 -17
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-hparams.h +44 -2
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-kv-cache.h +407 -179
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-memory.h +13 -2
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-loader.h +5 -3
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-saver.h +37 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-model.h +24 -2
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-vocab.h +6 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama.h +102 -142
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/minja/chat-template.hpp +23 -11
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/minja/minja.hpp +186 -127
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Info.plist +0 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/_CodeSignature/CodeResources +1 -1
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/ggml-llama-sim.metallib +0 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/cactus.h +92 -5
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/cactus_ffi.h +229 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/chat.h +2 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/common.h +42 -51
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-backend.h +4 -4
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-common.h +12 -6
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpp.h +1 -1
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu.h +5 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-impl.h +52 -18
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-metal-impl.h +106 -14
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-opt.h +49 -28
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml.h +87 -106
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-arch.h +16 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-batch.h +2 -1
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-chat.h +7 -2
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-context.h +44 -33
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-cparams.h +1 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-graph.h +83 -17
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-hparams.h +44 -2
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-kv-cache.h +407 -179
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-memory.h +13 -2
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-model-loader.h +5 -3
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-model-saver.h +37 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-model.h +24 -2
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-vocab.h +6 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama.h +102 -142
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/minja/chat-template.hpp +23 -11
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/minja/minja.hpp +186 -127
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Info.plist +0 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/cactus +0 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/ggml-llama.metallib +0 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/cactus.h +92 -5
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/cactus_ffi.h +229 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/chat.h +2 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/common.h +42 -51
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-backend.h +4 -4
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-common.h +12 -6
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpp.h +1 -1
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu.h +5 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-impl.h +52 -18
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-metal-impl.h +106 -14
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-opt.h +49 -28
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml.h +87 -106
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-arch.h +16 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-batch.h +2 -1
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-chat.h +7 -2
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-context.h +44 -33
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-cparams.h +1 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-graph.h +83 -17
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-hparams.h +44 -2
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-kv-cache.h +407 -179
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-memory.h +13 -2
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-loader.h +5 -3
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-saver.h +37 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-model.h +24 -2
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-vocab.h +6 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama.h +102 -142
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/minja/chat-template.hpp +23 -11
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/minja/minja.hpp +186 -127
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Info.plist +0 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/_CodeSignature/CodeResources +1 -1
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/ggml-llama-sim.metallib +0 -0
- package/lib/commonjs/NativeCactus.js +1 -0
- package/lib/commonjs/NativeCactus.js.map +1 -1
- package/lib/commonjs/index.js +112 -0
- package/lib/commonjs/index.js.map +1 -1
- package/lib/commonjs/tools.js +118 -0
- package/lib/commonjs/tools.js.map +1 -0
- package/lib/module/NativeCactus.js +3 -0
- package/lib/module/NativeCactus.js.map +1 -1
- package/lib/module/index.js +87 -1
- package/lib/module/index.js.map +1 -1
- package/lib/module/tools.js +110 -0
- package/lib/module/tools.js.map +1 -0
- package/lib/typescript/NativeCactus.d.ts +30 -1
- package/lib/typescript/NativeCactus.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +21 -2
- package/lib/typescript/index.d.ts.map +1 -1
- package/lib/typescript/tools.d.ts +38 -0
- package/lib/typescript/tools.d.ts.map +1 -0
- package/package.json +6 -3
- package/src/NativeCactus.ts +62 -1
- package/src/index.ts +113 -2
- package/src/tools.ts +127 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-impl.h +0 -531
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/sgemm.h +0 -14
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-impl.h +0 -531
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/sgemm.h +0 -14
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-impl.h +0 -531
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/sgemm.h +0 -14
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-impl.h +0 -531
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/sgemm.h +0 -14
package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-backend.h
CHANGED
|
@@ -38,7 +38,7 @@ extern "C" {
|
|
|
38
38
|
LM_GGML_API lm_ggml_backend_buffer_t lm_ggml_backend_buft_alloc_buffer (lm_ggml_backend_buffer_type_t buft, size_t size);
|
|
39
39
|
LM_GGML_API size_t lm_ggml_backend_buft_get_alignment (lm_ggml_backend_buffer_type_t buft);
|
|
40
40
|
LM_GGML_API size_t lm_ggml_backend_buft_get_max_size (lm_ggml_backend_buffer_type_t buft);
|
|
41
|
-
LM_GGML_API size_t lm_ggml_backend_buft_get_alloc_size(lm_ggml_backend_buffer_type_t buft, struct lm_ggml_tensor * tensor);
|
|
41
|
+
LM_GGML_API size_t lm_ggml_backend_buft_get_alloc_size(lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor);
|
|
42
42
|
LM_GGML_API bool lm_ggml_backend_buft_is_host (lm_ggml_backend_buffer_type_t buft);
|
|
43
43
|
LM_GGML_API lm_ggml_backend_dev_t lm_ggml_backend_buft_get_device (lm_ggml_backend_buffer_type_t buft);
|
|
44
44
|
|
|
@@ -59,7 +59,7 @@ extern "C" {
|
|
|
59
59
|
LM_GGML_API enum lm_ggml_status lm_ggml_backend_buffer_init_tensor (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
|
|
60
60
|
LM_GGML_API size_t lm_ggml_backend_buffer_get_alignment (lm_ggml_backend_buffer_t buffer);
|
|
61
61
|
LM_GGML_API size_t lm_ggml_backend_buffer_get_max_size (lm_ggml_backend_buffer_t buffer);
|
|
62
|
-
LM_GGML_API size_t lm_ggml_backend_buffer_get_alloc_size(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
|
|
62
|
+
LM_GGML_API size_t lm_ggml_backend_buffer_get_alloc_size(lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor);
|
|
63
63
|
LM_GGML_API void lm_ggml_backend_buffer_clear (lm_ggml_backend_buffer_t buffer, uint8_t value);
|
|
64
64
|
LM_GGML_API bool lm_ggml_backend_buffer_is_host (lm_ggml_backend_buffer_t buffer);
|
|
65
65
|
LM_GGML_API void lm_ggml_backend_buffer_set_usage (lm_ggml_backend_buffer_t buffer, enum lm_ggml_backend_buffer_usage usage);
|
|
@@ -248,7 +248,7 @@ extern "C" {
|
|
|
248
248
|
// preferrably to run on the same backend as the buffer
|
|
249
249
|
lm_ggml_backend_buffer_set_usage(buf_weights, LM_GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
|
|
250
250
|
|
|
251
|
-
sched = lm_ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, LM_GGML_DEFAULT_GRAPH_SIZE, false);
|
|
251
|
+
sched = lm_ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, LM_GGML_DEFAULT_GRAPH_SIZE, false, true);
|
|
252
252
|
|
|
253
253
|
// initialize buffers from a max size graph (optional)
|
|
254
254
|
reserve_graph = build_graph(sched, max_batch_size);
|
|
@@ -289,7 +289,7 @@ extern "C" {
|
|
|
289
289
|
typedef bool (*lm_ggml_backend_sched_eval_callback)(struct lm_ggml_tensor * t, bool ask, void * user_data);
|
|
290
290
|
|
|
291
291
|
// Initialize a backend scheduler, backends with low index are given priority over backends with high index
|
|
292
|
-
LM_GGML_API lm_ggml_backend_sched_t lm_ggml_backend_sched_new(lm_ggml_backend_t * backends, lm_ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
|
|
292
|
+
LM_GGML_API lm_ggml_backend_sched_t lm_ggml_backend_sched_new(lm_ggml_backend_t * backends, lm_ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel, bool op_offload);
|
|
293
293
|
LM_GGML_API void lm_ggml_backend_sched_free(lm_ggml_backend_sched_t sched);
|
|
294
294
|
|
|
295
295
|
// Initialize backend buffers from a measure graph
|
package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-common.h
CHANGED
|
@@ -158,6 +158,12 @@ typedef sycl::half2 lm_ggml_half2;
|
|
|
158
158
|
|
|
159
159
|
#endif // LM_GGML_COMMON_DECL_CUDA || LM_GGML_COMMON_DECL_HIP
|
|
160
160
|
|
|
161
|
+
#ifdef _MSC_VER
|
|
162
|
+
#define LM_GGML_EXTENSION
|
|
163
|
+
#else // _MSC_VER
|
|
164
|
+
#define LM_GGML_EXTENSION __extension__
|
|
165
|
+
#endif // _MSC_VER
|
|
166
|
+
|
|
161
167
|
#define QK4_0 32
|
|
162
168
|
typedef struct {
|
|
163
169
|
lm_ggml_half d; // delta
|
|
@@ -167,7 +173,7 @@ static_assert(sizeof(block_q4_0) == sizeof(lm_ggml_half) + QK4_0 / 2, "wrong q4_
|
|
|
167
173
|
|
|
168
174
|
#define QK4_1 32
|
|
169
175
|
typedef struct {
|
|
170
|
-
union {
|
|
176
|
+
LM_GGML_EXTENSION union {
|
|
171
177
|
struct {
|
|
172
178
|
lm_ggml_half d; // delta
|
|
173
179
|
lm_ggml_half m; // min
|
|
@@ -188,7 +194,7 @@ static_assert(sizeof(block_q5_0) == sizeof(lm_ggml_half) + sizeof(uint32_t) + QK
|
|
|
188
194
|
|
|
189
195
|
#define QK5_1 32
|
|
190
196
|
typedef struct {
|
|
191
|
-
union {
|
|
197
|
+
LM_GGML_EXTENSION union {
|
|
192
198
|
struct {
|
|
193
199
|
lm_ggml_half d; // delta
|
|
194
200
|
lm_ggml_half m; // min
|
|
@@ -209,7 +215,7 @@ static_assert(sizeof(block_q8_0) == sizeof(lm_ggml_half) + QK8_0, "wrong q8_0 bl
|
|
|
209
215
|
|
|
210
216
|
#define QK8_1 32
|
|
211
217
|
typedef struct {
|
|
212
|
-
union {
|
|
218
|
+
LM_GGML_EXTENSION union {
|
|
213
219
|
struct {
|
|
214
220
|
lm_ggml_half d; // delta
|
|
215
221
|
lm_ggml_half s; // d * sum(qs[i])
|
|
@@ -250,7 +256,7 @@ static_assert(sizeof(block_tq2_0) == sizeof(lm_ggml_half) + QK_K / 4, "wrong tq2
|
|
|
250
256
|
typedef struct {
|
|
251
257
|
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
|
|
252
258
|
uint8_t qs[QK_K/4]; // quants
|
|
253
|
-
union {
|
|
259
|
+
LM_GGML_EXTENSION union {
|
|
254
260
|
struct {
|
|
255
261
|
lm_ggml_half d; // super-block scale for quantized scales
|
|
256
262
|
lm_ggml_half dmin; // super-block scale for quantized mins
|
|
@@ -277,7 +283,7 @@ static_assert(sizeof(block_q3_K) == sizeof(lm_ggml_half) + QK_K / 4 + QK_K / 8 +
|
|
|
277
283
|
// weight is represented as x = a * q + b
|
|
278
284
|
// Effectively 4.5 bits per weight
|
|
279
285
|
typedef struct {
|
|
280
|
-
union {
|
|
286
|
+
LM_GGML_EXTENSION union {
|
|
281
287
|
struct {
|
|
282
288
|
lm_ggml_half d; // super-block scale for quantized scales
|
|
283
289
|
lm_ggml_half dmin; // super-block scale for quantized mins
|
|
@@ -294,7 +300,7 @@ static_assert(sizeof(block_q4_K) == 2*sizeof(lm_ggml_half) + K_SCALE_SIZE + QK_K
|
|
|
294
300
|
// weight is represented as x = a * q + b
|
|
295
301
|
// Effectively 5.5 bits per weight
|
|
296
302
|
typedef struct {
|
|
297
|
-
union {
|
|
303
|
+
LM_GGML_EXTENSION union {
|
|
298
304
|
struct {
|
|
299
305
|
lm_ggml_half d; // super-block scale for quantized scales
|
|
300
306
|
lm_ggml_half dmin; // super-block scale for quantized mins
|
package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpp.h
CHANGED
|
@@ -24,7 +24,7 @@ typedef std::unique_ptr<lm_gguf_context, lm_gguf_context_deleter> lm_gguf_contex
|
|
|
24
24
|
|
|
25
25
|
struct lm_ggml_gallocr_deleter { void operator()(lm_ggml_gallocr_t galloc) { lm_ggml_gallocr_free(galloc); } };
|
|
26
26
|
|
|
27
|
-
typedef std::unique_ptr<
|
|
27
|
+
typedef std::unique_ptr<lm_ggml_gallocr, lm_ggml_gallocr_deleter> lm_ggml_gallocr_ptr;
|
|
28
28
|
|
|
29
29
|
// ggml-backend
|
|
30
30
|
|
package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu.h
CHANGED
|
@@ -133,6 +133,11 @@ extern "C" {
|
|
|
133
133
|
|
|
134
134
|
LM_GGML_BACKEND_API lm_ggml_backend_reg_t lm_ggml_backend_cpu_reg(void);
|
|
135
135
|
|
|
136
|
+
LM_GGML_BACKEND_API void lm_ggml_cpu_fp32_to_fp16(const float *, lm_ggml_fp16_t *, int64_t);
|
|
137
|
+
LM_GGML_BACKEND_API void lm_ggml_cpu_fp16_to_fp32(const lm_ggml_fp16_t *, float *, int64_t);
|
|
138
|
+
LM_GGML_BACKEND_API void lm_ggml_cpu_fp32_to_bf16(const float *, lm_ggml_bf16_t *, int64_t);
|
|
139
|
+
LM_GGML_BACKEND_API void lm_ggml_cpu_bf16_to_fp32(const lm_ggml_bf16_t *, float *, int64_t);
|
|
140
|
+
|
|
136
141
|
#ifdef __cplusplus
|
|
137
142
|
}
|
|
138
143
|
#endif
|
package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-impl.h
CHANGED
|
@@ -148,8 +148,14 @@ struct lm_ggml_map_custom2_op_params {
|
|
|
148
148
|
|
|
149
149
|
struct lm_ggml_map_custom3_op_params {
|
|
150
150
|
lm_ggml_custom3_op_t fun;
|
|
151
|
-
int
|
|
152
|
-
void
|
|
151
|
+
int n_tasks;
|
|
152
|
+
void * userdata;
|
|
153
|
+
};
|
|
154
|
+
|
|
155
|
+
struct lm_ggml_custom_op_params {
|
|
156
|
+
lm_ggml_custom_op_t fun;
|
|
157
|
+
int n_tasks;
|
|
158
|
+
void * userdata;
|
|
153
159
|
};
|
|
154
160
|
|
|
155
161
|
// bitset
|
|
@@ -311,29 +317,28 @@ LM_GGML_API void lm_ggml_aligned_free(void * ptr, size_t size);
|
|
|
311
317
|
|
|
312
318
|
// FP16 to FP32 conversion
|
|
313
319
|
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
#if defined(__ARM_NEON) && !defined(_MSC_VER) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11)
|
|
320
|
+
// 16-bit float
|
|
321
|
+
// on Arm, we use __fp16
|
|
322
|
+
// on x86, we use uint16_t
|
|
323
|
+
//
|
|
324
|
+
// for old CUDA compilers (<= 11), we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/10616
|
|
325
|
+
// for MUSA compilers , we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/11843
|
|
326
|
+
//
|
|
327
|
+
#if defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__)
|
|
323
328
|
#define LM_GGML_COMPUTE_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
|
|
324
329
|
#define LM_GGML_COMPUTE_FP32_TO_FP16(x) lm_ggml_compute_fp32_to_fp16(x)
|
|
325
330
|
|
|
326
331
|
#define LM_GGML_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
|
|
327
332
|
|
|
328
333
|
static inline float lm_ggml_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
|
|
329
|
-
|
|
334
|
+
__fp16 tmp;
|
|
330
335
|
memcpy(&tmp, &h, sizeof(lm_ggml_fp16_t));
|
|
331
336
|
return (float)tmp;
|
|
332
337
|
}
|
|
333
338
|
|
|
334
339
|
static inline lm_ggml_fp16_t lm_ggml_compute_fp32_to_fp16(float f) {
|
|
335
340
|
lm_ggml_fp16_t res;
|
|
336
|
-
|
|
341
|
+
__fp16 tmp = f;
|
|
337
342
|
memcpy(&res, &tmp, sizeof(lm_ggml_fp16_t));
|
|
338
343
|
return res;
|
|
339
344
|
}
|
|
@@ -357,8 +362,8 @@ LM_GGML_API void lm_ggml_aligned_free(void * ptr, size_t size);
|
|
|
357
362
|
#define LM_GGML_FP32_TO_FP16(x) LM_GGML_COMPUTE_FP32_TO_FP16(x)
|
|
358
363
|
|
|
359
364
|
static inline float lm_ggml_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
|
|
360
|
-
|
|
361
|
-
|
|
365
|
+
float f;
|
|
366
|
+
double d;
|
|
362
367
|
__asm__(
|
|
363
368
|
"mtfprd %0,%2\n"
|
|
364
369
|
"xscvhpdp %0,%0\n"
|
|
@@ -370,8 +375,8 @@ LM_GGML_API void lm_ggml_aligned_free(void * ptr, size_t size);
|
|
|
370
375
|
}
|
|
371
376
|
|
|
372
377
|
static inline lm_ggml_fp16_t lm_ggml_compute_fp32_to_fp16(float f) {
|
|
373
|
-
|
|
374
|
-
|
|
378
|
+
double d;
|
|
379
|
+
lm_ggml_fp16_t r;
|
|
375
380
|
__asm__( /* xscvdphp can work on double or single precision */
|
|
376
381
|
"xscvdphp %0,%2\n"
|
|
377
382
|
"mffprd %1,%0\n" :
|
|
@@ -381,6 +386,35 @@ LM_GGML_API void lm_ggml_aligned_free(void * ptr, size_t size);
|
|
|
381
386
|
return r;
|
|
382
387
|
}
|
|
383
388
|
|
|
389
|
+
#elif defined(__riscv) && defined(LM_GGML_RV_ZFH)
|
|
390
|
+
|
|
391
|
+
static inline float lm_ggml_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
|
|
392
|
+
float f;
|
|
393
|
+
__asm__(
|
|
394
|
+
"fmv.h.x %[f], %[h]\n\t"
|
|
395
|
+
"fcvt.s.h %[f], %[f]"
|
|
396
|
+
: [f] "=&f" (f)
|
|
397
|
+
: [h] "r" (h)
|
|
398
|
+
);
|
|
399
|
+
return f;
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
static inline lm_ggml_fp16_t lm_ggml_compute_fp32_to_fp16(float f) {
|
|
403
|
+
lm_ggml_fp16_t res;
|
|
404
|
+
__asm__(
|
|
405
|
+
"fcvt.h.s %[f], %[f]\n\t"
|
|
406
|
+
"fmv.x.h %[h], %[f]"
|
|
407
|
+
: [h] "=&r" (res)
|
|
408
|
+
: [f] "f" (f)
|
|
409
|
+
);
|
|
410
|
+
return res;
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
#define LM_GGML_COMPUTE_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
|
|
414
|
+
#define LM_GGML_COMPUTE_FP32_TO_FP16(x) lm_ggml_compute_fp32_to_fp16(x)
|
|
415
|
+
#define LM_GGML_FP16_TO_FP32(x) LM_GGML_COMPUTE_FP16_TO_FP32(x)
|
|
416
|
+
#define LM_GGML_FP32_TO_FP16(x) LM_GGML_COMPUTE_FP32_TO_FP16(x)
|
|
417
|
+
|
|
384
418
|
#else
|
|
385
419
|
|
|
386
420
|
// FP16 <-> FP32
|
|
@@ -456,7 +490,7 @@ LM_GGML_API void lm_ggml_aligned_free(void * ptr, size_t size);
|
|
|
456
490
|
#define LM_GGML_COMPUTE_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
|
|
457
491
|
#define LM_GGML_COMPUTE_FP32_TO_FP16(x) lm_ggml_compute_fp32_to_fp16(x)
|
|
458
492
|
|
|
459
|
-
#endif // defined(__ARM_NEON) && (!defined(
|
|
493
|
+
#endif // defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__)
|
|
460
494
|
|
|
461
495
|
// precomputed f32 table for f16 (256 KB)
|
|
462
496
|
// defined in ggml.c, initialized in lm_ggml_init()
|
package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-metal-impl.h
CHANGED
|
@@ -1,6 +1,70 @@
|
|
|
1
1
|
#ifndef LM_GGML_METAL_IMPL
|
|
2
2
|
#define LM_GGML_METAL_IMPL
|
|
3
3
|
|
|
4
|
+
// kernel parameters for mat-vec threadgroups
|
|
5
|
+
//
|
|
6
|
+
// N_R0: number of src0 rows to process per simdgroup
|
|
7
|
+
// N_SG: number of simdgroups per threadgroup
|
|
8
|
+
//
|
|
9
|
+
// TODO: for optimal performance, become function of the device and work size
|
|
10
|
+
|
|
11
|
+
#define N_R0_Q4_0 4
|
|
12
|
+
#define N_SG_Q4_0 2
|
|
13
|
+
|
|
14
|
+
#define N_R0_Q4_1 4
|
|
15
|
+
#define N_SG_Q4_1 2
|
|
16
|
+
|
|
17
|
+
#define N_R0_Q5_0 4
|
|
18
|
+
#define N_SG_Q5_0 2
|
|
19
|
+
|
|
20
|
+
#define N_R0_Q5_1 4
|
|
21
|
+
#define N_SG_Q5_1 2
|
|
22
|
+
|
|
23
|
+
#define N_R0_Q8_0 4
|
|
24
|
+
#define N_SG_Q8_0 2
|
|
25
|
+
|
|
26
|
+
#define N_R0_Q2_K 4
|
|
27
|
+
#define N_SG_Q2_K 2
|
|
28
|
+
|
|
29
|
+
#define N_R0_Q3_K 2
|
|
30
|
+
#define N_SG_Q3_K 2
|
|
31
|
+
|
|
32
|
+
#define N_R0_Q4_K 4
|
|
33
|
+
#define N_SG_Q4_K 2
|
|
34
|
+
|
|
35
|
+
#define N_R0_Q5_K 2
|
|
36
|
+
#define N_SG_Q5_K 2
|
|
37
|
+
|
|
38
|
+
#define N_R0_Q6_K 1
|
|
39
|
+
#define N_SG_Q6_K 2
|
|
40
|
+
|
|
41
|
+
#define N_R0_IQ1_S 4
|
|
42
|
+
#define N_SG_IQ1_S 2
|
|
43
|
+
|
|
44
|
+
#define N_R0_IQ1_M 4
|
|
45
|
+
#define N_SG_IQ1_M 2
|
|
46
|
+
|
|
47
|
+
#define N_R0_IQ2_XXS 4
|
|
48
|
+
#define N_SG_IQ2_XXS 2
|
|
49
|
+
|
|
50
|
+
#define N_R0_IQ2_XS 4
|
|
51
|
+
#define N_SG_IQ2_XS 2
|
|
52
|
+
|
|
53
|
+
#define N_R0_IQ2_S 4
|
|
54
|
+
#define N_SG_IQ2_S 2
|
|
55
|
+
|
|
56
|
+
#define N_R0_IQ3_XXS 4
|
|
57
|
+
#define N_SG_IQ3_XXS 2
|
|
58
|
+
|
|
59
|
+
#define N_R0_IQ3_S 4
|
|
60
|
+
#define N_SG_IQ3_S 2
|
|
61
|
+
|
|
62
|
+
#define N_R0_IQ4_NL 2
|
|
63
|
+
#define N_SG_IQ4_NL 2
|
|
64
|
+
|
|
65
|
+
#define N_R0_IQ4_XS 2
|
|
66
|
+
#define N_SG_IQ4_XS 2
|
|
67
|
+
|
|
4
68
|
// kernel argument structs
|
|
5
69
|
//
|
|
6
70
|
// - element counters (e.g. ne00) typically use int32_t to reduce register usage
|
|
@@ -143,6 +207,10 @@ typedef struct {
|
|
|
143
207
|
float attn_factor;
|
|
144
208
|
float beta_fast;
|
|
145
209
|
float beta_slow;
|
|
210
|
+
int32_t sect_0;
|
|
211
|
+
int32_t sect_1;
|
|
212
|
+
int32_t sect_2;
|
|
213
|
+
int32_t sect_3;
|
|
146
214
|
} lm_ggml_metal_kargs_rope;
|
|
147
215
|
|
|
148
216
|
typedef struct {
|
|
@@ -155,9 +223,12 @@ typedef struct {
|
|
|
155
223
|
int32_t ne11;
|
|
156
224
|
int32_t ne_12_2; // assume K and V are same shape
|
|
157
225
|
int32_t ne_12_3;
|
|
158
|
-
uint64_t
|
|
159
|
-
uint64_t
|
|
160
|
-
uint64_t
|
|
226
|
+
uint64_t nb11;
|
|
227
|
+
uint64_t nb12;
|
|
228
|
+
uint64_t nb13;
|
|
229
|
+
uint64_t nb21;
|
|
230
|
+
uint64_t nb22;
|
|
231
|
+
uint64_t nb23;
|
|
161
232
|
uint64_t nb31;
|
|
162
233
|
int32_t ne1;
|
|
163
234
|
int32_t ne2;
|
|
@@ -232,21 +303,42 @@ typedef struct {
|
|
|
232
303
|
} lm_ggml_metal_kargs_mul_mv_ext;
|
|
233
304
|
|
|
234
305
|
typedef struct {
|
|
235
|
-
int32_t
|
|
236
|
-
int32_t
|
|
237
|
-
uint64_t
|
|
306
|
+
int32_t ne10;
|
|
307
|
+
int32_t ne11; // n_expert_used (bcast)
|
|
308
|
+
uint64_t nb11;
|
|
309
|
+
uint64_t nb12;
|
|
310
|
+
int32_t neh11; // n_tokens
|
|
311
|
+
uint64_t nbh11;
|
|
312
|
+
int32_t ne20; // n_expert_used
|
|
313
|
+
uint64_t nb21;
|
|
314
|
+
} lm_ggml_metal_kargs_mul_mm_id_map0;
|
|
315
|
+
|
|
316
|
+
typedef struct {
|
|
317
|
+
int32_t ne20; // n_expert_used
|
|
318
|
+
int32_t neh0;
|
|
319
|
+
int32_t neh1;
|
|
320
|
+
uint64_t nbh1;
|
|
321
|
+
uint64_t nbh2;
|
|
322
|
+
int32_t ne0;
|
|
323
|
+
uint64_t nb1;
|
|
324
|
+
uint64_t nb2;
|
|
325
|
+
} lm_ggml_metal_kargs_mul_mm_id_map1;
|
|
326
|
+
|
|
327
|
+
typedef struct {
|
|
238
328
|
int32_t ne00;
|
|
239
329
|
int32_t ne02;
|
|
240
330
|
uint64_t nb01;
|
|
241
331
|
uint64_t nb02;
|
|
242
|
-
|
|
243
|
-
int32_t
|
|
244
|
-
|
|
245
|
-
uint64_t
|
|
246
|
-
uint64_t
|
|
247
|
-
uint64_t
|
|
248
|
-
int32_t
|
|
249
|
-
int32_t
|
|
332
|
+
uint64_t nb03;
|
|
333
|
+
int32_t neh12;
|
|
334
|
+
uint64_t nbh10;
|
|
335
|
+
uint64_t nbh11;
|
|
336
|
+
uint64_t nbh12;
|
|
337
|
+
uint64_t nbh13;
|
|
338
|
+
int32_t neh0;
|
|
339
|
+
int32_t neh1;
|
|
340
|
+
int16_t r2;
|
|
341
|
+
int16_t r3;
|
|
250
342
|
} lm_ggml_metal_kargs_mul_mm_id;
|
|
251
343
|
|
|
252
344
|
typedef struct {
|
package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-opt.h
CHANGED
|
@@ -37,13 +37,16 @@ extern "C" {
|
|
|
37
37
|
// ====== Dataset ======
|
|
38
38
|
|
|
39
39
|
LM_GGML_API lm_ggml_opt_dataset_t lm_ggml_opt_dataset_init(
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
int64_t
|
|
43
|
-
int64_t
|
|
40
|
+
enum lm_ggml_type type_data, // the type for the internal data tensor
|
|
41
|
+
enum lm_ggml_type type_label, // the type for the internal labels tensor
|
|
42
|
+
int64_t ne_datapoint, // number of elements per datapoint
|
|
43
|
+
int64_t ne_label, // number of elements per label
|
|
44
|
+
int64_t ndata, // total number of datapoints/labels
|
|
45
|
+
int64_t ndata_shard); // number of datapoints/labels per shard (unit at which the dataset is shuffled/copied)
|
|
44
46
|
LM_GGML_API void lm_ggml_opt_dataset_free(lm_ggml_opt_dataset_t dataset);
|
|
45
47
|
|
|
46
48
|
// get underlying tensors that store the data
|
|
49
|
+
LM_GGML_API int64_t lm_ggml_opt_dataset_ndata (lm_ggml_opt_dataset_t dataset);
|
|
47
50
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_dataset_data (lm_ggml_opt_dataset_t dataset); // shape = [ne_datapoint, ndata]
|
|
48
51
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_dataset_labels(lm_ggml_opt_dataset_t dataset); // shape = [nd_label, ndata]
|
|
49
52
|
|
|
@@ -56,13 +59,19 @@ extern "C" {
|
|
|
56
59
|
struct lm_ggml_tensor * data_batch, // shape = [ne_datapoint, ndata_batch]
|
|
57
60
|
struct lm_ggml_tensor * labels_batch, // shape = [ne_label, ndata_batch]
|
|
58
61
|
int64_t ibatch);
|
|
62
|
+
LM_GGML_API void lm_ggml_opt_dataset_get_batch_host(
|
|
63
|
+
lm_ggml_opt_dataset_t dataset,
|
|
64
|
+
void * data_batch,
|
|
65
|
+
size_t nb_data_batch,
|
|
66
|
+
void * labels_batch,
|
|
67
|
+
int64_t ibatch);
|
|
59
68
|
|
|
60
69
|
// ====== Model / Context ======
|
|
61
70
|
|
|
62
71
|
enum lm_ggml_opt_build_type {
|
|
63
|
-
LM_GGML_OPT_BUILD_TYPE_FORWARD,
|
|
64
|
-
LM_GGML_OPT_BUILD_TYPE_GRAD,
|
|
65
|
-
LM_GGML_OPT_BUILD_TYPE_OPT,
|
|
72
|
+
LM_GGML_OPT_BUILD_TYPE_FORWARD = 10,
|
|
73
|
+
LM_GGML_OPT_BUILD_TYPE_GRAD = 20,
|
|
74
|
+
LM_GGML_OPT_BUILD_TYPE_OPT = 30,
|
|
66
75
|
};
|
|
67
76
|
|
|
68
77
|
// parameters that control which optimizer is used and how said optimizer tries to find the minimal loss
|
|
@@ -81,20 +90,22 @@ extern "C" {
|
|
|
81
90
|
// userdata can be used to pass arbitrary data
|
|
82
91
|
typedef struct lm_ggml_opt_optimizer_params (*lm_ggml_opt_get_optimizer_params)(void * userdata);
|
|
83
92
|
|
|
84
|
-
// returns the default optimizer params (constant)
|
|
93
|
+
// returns the default optimizer params (constant, hard-coded values)
|
|
85
94
|
// userdata is not used
|
|
86
95
|
LM_GGML_API struct lm_ggml_opt_optimizer_params lm_ggml_opt_get_default_optimizer_params(void * userdata);
|
|
87
96
|
|
|
97
|
+
// casts userdata to lm_ggml_opt_optimizer_params and returns it
|
|
98
|
+
LM_GGML_API struct lm_ggml_opt_optimizer_params lm_ggml_opt_get_constant_optimizer_params(void * userdata);
|
|
99
|
+
|
|
88
100
|
// parameters for initializing a new optimization context
|
|
89
101
|
struct lm_ggml_opt_params {
|
|
90
102
|
lm_ggml_backend_sched_t backend_sched; // defines which backends are used to construct the compute graphs
|
|
91
103
|
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
struct lm_ggml_tensor
|
|
97
|
-
struct lm_ggml_tensor * outputs;
|
|
104
|
+
// by default the forward graph needs to be reconstructed for each eval
|
|
105
|
+
// if ctx_compute, inputs, and outputs are set the graphs are instead allocated statically
|
|
106
|
+
struct lm_ggml_context * ctx_compute;
|
|
107
|
+
struct lm_ggml_tensor * inputs;
|
|
108
|
+
struct lm_ggml_tensor * outputs;
|
|
98
109
|
|
|
99
110
|
enum lm_ggml_opt_loss_type loss_type;
|
|
100
111
|
enum lm_ggml_opt_build_type build_type;
|
|
@@ -107,12 +118,9 @@ extern "C" {
|
|
|
107
118
|
|
|
108
119
|
// get parameters for an optimization context with defaults set where possible
|
|
109
120
|
// parameters for which no sensible defaults exist are supplied as arguments to this function
|
|
110
|
-
LM_GGML_API lm_ggml_opt_params lm_ggml_opt_default_params(
|
|
111
|
-
lm_ggml_backend_sched_t
|
|
112
|
-
|
|
113
|
-
struct lm_ggml_tensor * inputs,
|
|
114
|
-
struct lm_ggml_tensor * outputs,
|
|
115
|
-
enum lm_ggml_opt_loss_type loss_type);
|
|
121
|
+
LM_GGML_API struct lm_ggml_opt_params lm_ggml_opt_default_params(
|
|
122
|
+
lm_ggml_backend_sched_t backend_sched,
|
|
123
|
+
enum lm_ggml_opt_loss_type loss_type);
|
|
116
124
|
|
|
117
125
|
LM_GGML_API lm_ggml_opt_context_t lm_ggml_opt_init(struct lm_ggml_opt_params params);
|
|
118
126
|
LM_GGML_API void lm_ggml_opt_free(lm_ggml_opt_context_t opt_ctx);
|
|
@@ -120,7 +128,10 @@ extern "C" {
|
|
|
120
128
|
// set gradients to zero, initilize loss, and optionally reset the optimizer
|
|
121
129
|
LM_GGML_API void lm_ggml_opt_reset(lm_ggml_opt_context_t opt_ctx, bool optimizer);
|
|
122
130
|
|
|
131
|
+
LM_GGML_API bool lm_ggml_opt_static_graphs(lm_ggml_opt_context_t opt_ctx); // whether the graphs are allocated_statically
|
|
132
|
+
|
|
123
133
|
// get underlying tensors that store data
|
|
134
|
+
// if not using static graphs these pointers become invalid with the next call to lm_ggml_opt_alloc
|
|
124
135
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_inputs( lm_ggml_opt_context_t opt_ctx); // forward graph input tensor
|
|
125
136
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_outputs( lm_ggml_opt_context_t opt_ctx); // forward graph output tensor
|
|
126
137
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_labels( lm_ggml_opt_context_t opt_ctx); // labels to compare outputs against
|
|
@@ -128,11 +139,12 @@ extern "C" {
|
|
|
128
139
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_pred( lm_ggml_opt_context_t opt_ctx); // predictions made by outputs
|
|
129
140
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_ncorrect(lm_ggml_opt_context_t opt_ctx); // number of matching predictions between outputs and labels
|
|
130
141
|
|
|
142
|
+
// get the gradient accumulator for a node from the forward graph
|
|
131
143
|
LM_GGML_API struct lm_ggml_tensor * lm_ggml_opt_grad_acc(lm_ggml_opt_context_t opt_ctx, struct lm_ggml_tensor * node);
|
|
132
144
|
|
|
133
145
|
// ====== Optimization Result ======
|
|
134
146
|
|
|
135
|
-
LM_GGML_API lm_ggml_opt_result_t lm_ggml_opt_result_init();
|
|
147
|
+
LM_GGML_API lm_ggml_opt_result_t lm_ggml_opt_result_init(void);
|
|
136
148
|
LM_GGML_API void lm_ggml_opt_result_free(lm_ggml_opt_result_t result);
|
|
137
149
|
LM_GGML_API void lm_ggml_opt_result_reset(lm_ggml_opt_result_t result);
|
|
138
150
|
|
|
@@ -144,11 +156,20 @@ extern "C" {
|
|
|
144
156
|
|
|
145
157
|
// ====== Computation ======
|
|
146
158
|
|
|
147
|
-
//
|
|
148
|
-
LM_GGML_API void
|
|
159
|
+
// if not using static graphs, this function must be called prior to lm_ggml_opt_alloc
|
|
160
|
+
LM_GGML_API void lm_ggml_opt_prepare_alloc(
|
|
161
|
+
lm_ggml_opt_context_t opt_ctx,
|
|
162
|
+
struct lm_ggml_context * ctx_compute,
|
|
163
|
+
struct lm_ggml_cgraph * gf,
|
|
164
|
+
struct lm_ggml_tensor * inputs,
|
|
165
|
+
struct lm_ggml_tensor * outputs);
|
|
166
|
+
|
|
167
|
+
// allocate the next graph for evaluation, either forward or forward + backward
|
|
168
|
+
// must be called exactly once prior to calling lm_ggml_opt_eval
|
|
169
|
+
LM_GGML_API void lm_ggml_opt_alloc(lm_ggml_opt_context_t opt_ctx, bool backward);
|
|
149
170
|
|
|
150
|
-
// do forward pass, increment result if not NULL, do backward pass
|
|
151
|
-
LM_GGML_API void
|
|
171
|
+
// do forward pass, increment result if not NULL, do backward pass if allocated
|
|
172
|
+
LM_GGML_API void lm_ggml_opt_eval(lm_ggml_opt_context_t opt_ctx, lm_ggml_opt_result_t result);
|
|
152
173
|
|
|
153
174
|
// ############################################################################
|
|
154
175
|
// ## The high-level functions start here. They do not depend on any private ##
|
|
@@ -200,9 +221,9 @@ extern "C" {
|
|
|
200
221
|
// fit model defined by inputs and outputs to dataset
|
|
201
222
|
LM_GGML_API void lm_ggml_opt_fit(
|
|
202
223
|
lm_ggml_backend_sched_t backend_sched, // backend scheduler for constructing the compute graphs
|
|
203
|
-
lm_ggml_context
|
|
204
|
-
lm_ggml_tensor
|
|
205
|
-
lm_ggml_tensor
|
|
224
|
+
struct lm_ggml_context * ctx_compute, // context with temporarily allocated tensors to calculate the outputs
|
|
225
|
+
struct lm_ggml_tensor * inputs, // input tensor with shape [ne_datapoint, ndata_batch]
|
|
226
|
+
struct lm_ggml_tensor * outputs, // output tensor, must have shape [ne_label, ndata_batch] if labels are used
|
|
206
227
|
lm_ggml_opt_dataset_t dataset, // dataset with data and optionally also labels
|
|
207
228
|
enum lm_ggml_opt_loss_type loss_type, // loss to minimize
|
|
208
229
|
lm_ggml_opt_get_optimizer_params get_opt_pars, // callback to get optimizer params, userdata is pointer to epoch (of type int64_t)
|