cui-llama.rn 1.6.1 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/CMakeLists.txt +6 -0
- package/android/src/main/java/com/rnllama/LlamaContext.java +38 -5
- package/android/src/main/java/com/rnllama/RNLlama.java +139 -4
- package/android/src/main/jni.cpp +153 -14
- package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
- package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +24 -4
- package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +22 -2
- package/cpp/chat.cpp +128 -106
- package/cpp/chat.h +2 -0
- package/cpp/common.cpp +41 -76
- package/cpp/common.h +23 -19
- package/cpp/ggml-backend.cpp +9 -5
- package/cpp/ggml-backend.h +4 -4
- package/cpp/ggml-cpu/ggml-cpu-aarch64.cpp +0 -2
- package/cpp/ggml-cpu/ggml-cpu-quants.c +306 -6
- package/cpp/ggml-cpu/ggml-cpu.c +5 -13
- package/cpp/ggml-cpu/ggml-cpu.cpp +29 -16
- package/cpp/ggml-cpu/ops.cpp +107 -13
- package/cpp/ggml-cpu/vec.cpp +0 -6
- package/cpp/ggml-cpu/vec.h +16 -0
- package/cpp/ggml-llama-sim.metallib +0 -0
- package/cpp/ggml-llama.metallib +0 -0
- package/cpp/ggml-metal-impl.h +36 -11
- package/cpp/ggml-metal.m +321 -132
- package/cpp/ggml-opt.cpp +373 -190
- package/cpp/ggml-opt.h +49 -28
- package/cpp/ggml-quants.c +0 -6
- package/cpp/ggml.c +93 -38
- package/cpp/ggml.h +21 -7
- package/cpp/gguf.cpp +33 -33
- package/cpp/llama-adapter.cpp +6 -0
- package/cpp/llama-arch.cpp +3 -0
- package/cpp/llama-batch.cpp +3 -1
- package/cpp/llama-chat.cpp +8 -6
- package/cpp/llama-chat.h +1 -0
- package/cpp/llama-context.cpp +349 -135
- package/cpp/llama-context.h +30 -3
- package/cpp/llama-cparams.h +1 -0
- package/cpp/llama-graph.cpp +150 -234
- package/cpp/llama-graph.h +52 -7
- package/cpp/llama-hparams.cpp +17 -1
- package/cpp/llama-hparams.h +34 -5
- package/cpp/llama-kv-cache.cpp +662 -321
- package/cpp/llama-kv-cache.h +203 -93
- package/cpp/llama-memory.h +3 -2
- package/cpp/llama-model-loader.cpp +24 -15
- package/cpp/llama-model-saver.cpp +281 -0
- package/cpp/llama-model-saver.h +37 -0
- package/cpp/llama-model.cpp +536 -132
- package/cpp/llama-model.h +7 -1
- package/cpp/llama-sampling.cpp +18 -6
- package/cpp/llama-vocab.cpp +46 -8
- package/cpp/llama-vocab.h +6 -0
- package/cpp/llama.cpp +14 -0
- package/cpp/llama.h +72 -131
- package/cpp/minja/chat-template.hpp +9 -5
- package/cpp/minja/minja.hpp +69 -36
- package/cpp/rn-llama.cpp +611 -47
- package/cpp/rn-llama.h +33 -3
- package/cpp/sampling.cpp +57 -50
- package/cpp/tools/mtmd/clip-impl.h +462 -0
- package/cpp/tools/mtmd/clip.cpp +4024 -0
- package/cpp/tools/mtmd/clip.h +101 -0
- package/cpp/tools/mtmd/miniaudio.h +93468 -0
- package/cpp/tools/mtmd/mtmd-audio.cpp +855 -0
- package/cpp/tools/mtmd/mtmd-audio.h +62 -0
- package/cpp/tools/mtmd/mtmd-helper.cpp +297 -0
- package/cpp/tools/mtmd/mtmd.cpp +942 -0
- package/cpp/tools/mtmd/mtmd.h +362 -0
- package/cpp/tools/mtmd/stb_image.h +7988 -0
- package/ios/CMakeLists.txt +7 -0
- package/ios/RNLlama.mm +77 -3
- package/ios/RNLlamaContext.h +5 -1
- package/ios/RNLlamaContext.mm +105 -10
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +23 -19
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +21 -7
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +30 -3
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +52 -7
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +34 -5
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +203 -93
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +3 -2
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +7 -1
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +72 -131
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +33 -3
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +23 -19
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +21 -7
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +30 -3
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +52 -7
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +34 -5
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +203 -93
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +3 -2
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +7 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +72 -131
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +33 -3
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +23 -19
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +21 -7
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +30 -3
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +52 -7
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +34 -5
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +203 -93
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +3 -2
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +7 -1
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +72 -131
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +33 -3
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +23 -19
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +21 -7
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +30 -3
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +52 -7
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +34 -5
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +203 -93
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +3 -2
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +7 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +72 -131
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +33 -3
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/jest/mock.js +33 -7
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/commonjs/index.js +153 -21
- package/lib/commonjs/index.js.map +1 -1
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/module/index.js +152 -20
- package/lib/module/index.js.map +1 -1
- package/lib/typescript/NativeRNLlama.d.ts +50 -4
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +72 -6
- package/lib/typescript/index.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/NativeRNLlama.ts +67 -4
- package/src/index.ts +212 -38
- package/lib/commonjs/chat.js +0 -37
- package/lib/commonjs/chat.js.map +0 -1
- package/lib/module/chat.js +0 -33
- package/lib/module/chat.js.map +0 -1
- package/lib/typescript/chat.d.ts +0 -10
- package/lib/typescript/chat.d.ts.map +0 -1
- package/src/chat.ts +0 -44
@@ -0,0 +1,462 @@
|
|
1
|
+
#include "ggml.h"
|
2
|
+
#include "gguf.h"
|
3
|
+
#include "clip.h"
|
4
|
+
|
5
|
+
#include <climits>
|
6
|
+
#include <cstdarg>
|
7
|
+
#include <cinttypes>
|
8
|
+
#include <string>
|
9
|
+
#include <map>
|
10
|
+
#include <sstream>
|
11
|
+
#include <vector>
|
12
|
+
#include <memory>
|
13
|
+
|
14
|
+
// Internal header for clip.cpp
|
15
|
+
|
16
|
+
#define KEY_FTYPE "general.file_type"
|
17
|
+
#define KEY_NAME "general.name"
|
18
|
+
#define KEY_DESCRIPTION "general.description"
|
19
|
+
#define KEY_PROJ_TYPE "clip.projector_type"
|
20
|
+
#define KEY_HAS_AUDIO_ENC "clip.has_audio_encoder"
|
21
|
+
#define KEY_HAS_VISION_ENC "clip.has_vision_encoder"
|
22
|
+
#define KEY_USE_GELU "clip.use_gelu"
|
23
|
+
#define KEY_USE_SILU "clip.use_silu"
|
24
|
+
|
25
|
+
#define KEY_N_EMBD "clip.%s.embedding_length"
|
26
|
+
#define KEY_N_FF "clip.%s.feed_forward_length"
|
27
|
+
#define KEY_N_BLOCK "clip.%s.block_count"
|
28
|
+
#define KEY_PROJ_DIM "clip.%s.projection_dim"
|
29
|
+
#define KEY_N_HEAD "clip.%s.attention.head_count"
|
30
|
+
#define KEY_LAYER_NORM_EPS "clip.%s.attention.layer_norm_epsilon"
|
31
|
+
|
32
|
+
// vision-specific
|
33
|
+
#define KEY_IMAGE_SIZE "clip.vision.image_size"
|
34
|
+
#define KEY_PATCH_SIZE "clip.vision.patch_size"
|
35
|
+
#define KEY_IMAGE_MEAN "clip.vision.image_mean"
|
36
|
+
#define KEY_IMAGE_STD "clip.vision.image_std"
|
37
|
+
#define KEY_FEATURE_LAYER "clip.vision.feature_layer"
|
38
|
+
#define KEY_PROJ_SCALE_FACTOR "clip.vision.projector.scale_factor"
|
39
|
+
#define KEY_SPATIAL_MERGE_SIZE "clip.vision.spatial_merge_size"
|
40
|
+
|
41
|
+
#define KEY_MM_PATCH_MERGE_TYPE "clip.vision.mm_patch_merge_type"
|
42
|
+
#define KEY_IMAGE_GRID_PINPOINTS "clip.vision.image_grid_pinpoints"
|
43
|
+
#define KEY_IMAGE_CROP_RESOLUTION "clip.vision.image_crop_resolution"
|
44
|
+
#define KEY_WIN_ATTN_PATTERN "clip.vision.n_wa_pattern"
|
45
|
+
#define KEY_ATTN_WINDOW_SIZE "clip.vision.window_size"
|
46
|
+
#define KEY_MINICPMV_VERSION "clip.minicpmv_version"
|
47
|
+
|
48
|
+
// audio-specific
|
49
|
+
#define KEY_A_NUM_MEL_BINS "clip.audio.num_mel_bins"
|
50
|
+
#define KEY_A_PROJ_STACK_FACTOR "clip.audio.projector.stack_factor"
|
51
|
+
|
52
|
+
|
53
|
+
//
|
54
|
+
// tensor name constants
|
55
|
+
//
|
56
|
+
|
57
|
+
#define TN_POS_EMBD "%s.position_embd.weight"
|
58
|
+
#define TN_CLASS_EMBD "v.class_embd"
|
59
|
+
#define TN_PATCH_EMBD "v.patch_embd.weight" // not rename tensor with ".0" postfix for backwrad compat
|
60
|
+
#define TN_PATCH_EMBD_1 "v.patch_embd.weight.1"
|
61
|
+
#define TN_PATCH_BIAS "v.patch_embd.bias"
|
62
|
+
#define TN_ATTN_K "%s.blk.%d.attn_k.%s"
|
63
|
+
#define TN_ATTN_Q "%s.blk.%d.attn_q.%s"
|
64
|
+
#define TN_ATTN_V "%s.blk.%d.attn_v.%s"
|
65
|
+
#define TN_ATTN_OUTPUT "%s.blk.%d.attn_out.%s"
|
66
|
+
#define TN_ATTN_K_NORM "%s.blk.%d.attn_k_norm.%s"
|
67
|
+
#define TN_ATTN_Q_NORM "%s.blk.%d.attn_q_norm.%s"
|
68
|
+
#define TN_FFN_DOWN "%s.blk.%d.ffn_down.%s"
|
69
|
+
#define TN_FFN_GATE "%s.blk.%d.ffn_gate.%s"
|
70
|
+
#define TN_FFN_UP "%s.blk.%d.ffn_up.%s"
|
71
|
+
#define TN_FFN_GATE "%s.blk.%d.ffn_gate.%s"
|
72
|
+
#define TN_LN_1 "%s.blk.%d.ln1.%s" // layer norm
|
73
|
+
#define TN_LN_2 "%s.blk.%d.ln2.%s" // layer norm
|
74
|
+
#define TN_LS_1 "%s.blk.%d.ls1.%s" // layer scale
|
75
|
+
#define TN_LS_2 "%s.blk.%d.ls2.%s" // layer scale
|
76
|
+
#define TN_LN_PRE "%s.pre_ln.%s"
|
77
|
+
#define TN_LN_POST "%s.post_ln.%s"
|
78
|
+
#define TN_LLAVA_PROJ "mm.%d.%s"
|
79
|
+
#define TN_MVLM_PROJ_MLP "mm.model.mlp.%d.%s"
|
80
|
+
#define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s"
|
81
|
+
#define TN_MVLM_PROJ_PEG "mm.model.peg.%d.%s"
|
82
|
+
#define TN_IMAGE_NEWLINE "model.image_newline"
|
83
|
+
#define TN_MM_INP_NORM "mm.input_norm.weight"
|
84
|
+
#define TN_MM_INP_PROJ "mm.input_projection.weight" // gemma3
|
85
|
+
#define TN_MM_SOFT_EMB_N "mm.soft_emb_norm.weight" // gemma3
|
86
|
+
#define TN_MM_PROJECTOR "mm.model.fc.weight" // idefics3
|
87
|
+
#define TN_MM_PATCH_MERGER "mm.patch_merger.weight" // mistral small 3.1
|
88
|
+
#define TN_TOK_IMG_BREAK "v.token_embd.img_break" // pixtral
|
89
|
+
#define TN_TOK_GLM_BOI "adapter.boi" // glm-edge (these embeddings are not in text model)
|
90
|
+
#define TN_TOK_GLM_EOI "adapter.eoi" // glm-edge (these embeddings are not in text model)
|
91
|
+
|
92
|
+
// mimicpmv
|
93
|
+
#define TN_MINICPMV_POS_EMBD_K "resampler.pos_embed_k"
|
94
|
+
#define TN_MINICPMV_QUERY "resampler.query"
|
95
|
+
#define TN_MINICPMV_PROJ "resampler.proj.weight"
|
96
|
+
#define TN_MINICPMV_KV_PROJ "resampler.kv.weight"
|
97
|
+
#define TN_MINICPMV_ATTN "resampler.attn.%s.%s"
|
98
|
+
#define TN_MINICPMV_LN "resampler.ln_%s.%s"
|
99
|
+
|
100
|
+
#define TN_GLM_ADAPER_CONV "adapter.conv.%s"
|
101
|
+
#define TN_GLM_ADAPTER_LINEAR "adapter.linear.linear.%s"
|
102
|
+
#define TN_GLM_ADAPTER_NORM_1 "adapter.linear.norm1.%s"
|
103
|
+
#define TN_GLM_ADAPTER_D_H_2_4H "adapter.linear.dense_h_to_4h.%s"
|
104
|
+
#define TN_GLM_ADAPTER_GATE "adapter.linear.gate.%s"
|
105
|
+
#define TN_GLM_ADAPTER_D_4H_2_H "adapter.linear.dense_4h_to_h.%s"
|
106
|
+
|
107
|
+
// ultravox
|
108
|
+
#define TN_CONV1D "a.conv1d.%d.%s"
|
109
|
+
#define TN_MM_AUDIO_MLP "mm.a.mlp.%d.%s"
|
110
|
+
#define TN_MM_NORM_PRE "mm.a.norm_pre.%s"
|
111
|
+
#define TN_MM_NORM_MID "mm.a.norm_mid.%s"
|
112
|
+
|
113
|
+
// align x to upper multiple of n
|
114
|
+
#define CLIP_ALIGN(x, n) ((((x) + (n) - 1) / (n)) * (n))
|
115
|
+
|
116
|
+
enum projector_type {
|
117
|
+
PROJECTOR_TYPE_MLP,
|
118
|
+
PROJECTOR_TYPE_MLP_NORM,
|
119
|
+
PROJECTOR_TYPE_LDP,
|
120
|
+
PROJECTOR_TYPE_LDPV2,
|
121
|
+
PROJECTOR_TYPE_MINICPMV,
|
122
|
+
PROJECTOR_TYPE_GLM_EDGE,
|
123
|
+
PROJECTOR_TYPE_QWEN2VL,
|
124
|
+
PROJECTOR_TYPE_GEMMA3,
|
125
|
+
PROJECTOR_TYPE_IDEFICS3,
|
126
|
+
PROJECTOR_TYPE_PIXTRAL,
|
127
|
+
PROJECTOR_TYPE_QWEN25VL,
|
128
|
+
PROJECTOR_TYPE_ULTRAVOX,
|
129
|
+
PROJECTOR_TYPE_INTERNVL,
|
130
|
+
PROJECTOR_TYPE_LLAMA4,
|
131
|
+
PROJECTOR_TYPE_UNKNOWN,
|
132
|
+
};
|
133
|
+
|
134
|
+
static std::map<projector_type, std::string> PROJECTOR_TYPE_NAMES = {
|
135
|
+
{ PROJECTOR_TYPE_MLP, "mlp" },
|
136
|
+
{ PROJECTOR_TYPE_LDP, "ldp" },
|
137
|
+
{ PROJECTOR_TYPE_LDPV2, "ldpv2"},
|
138
|
+
{ PROJECTOR_TYPE_MINICPMV, "resampler"},
|
139
|
+
{ PROJECTOR_TYPE_GLM_EDGE, "adapter"},
|
140
|
+
{ PROJECTOR_TYPE_QWEN2VL, "qwen2vl_merger"},
|
141
|
+
{ PROJECTOR_TYPE_QWEN25VL, "qwen2.5vl_merger"},
|
142
|
+
{ PROJECTOR_TYPE_GEMMA3, "gemma3"},
|
143
|
+
{ PROJECTOR_TYPE_IDEFICS3, "idefics3"},
|
144
|
+
{ PROJECTOR_TYPE_PIXTRAL, "pixtral"},
|
145
|
+
{ PROJECTOR_TYPE_ULTRAVOX, "ultravox"},
|
146
|
+
{ PROJECTOR_TYPE_INTERNVL, "internvl"},
|
147
|
+
{ PROJECTOR_TYPE_LLAMA4, "llama4"},
|
148
|
+
};
|
149
|
+
|
150
|
+
static projector_type clip_projector_type_from_string(const std::string & str) {
|
151
|
+
for (const auto & pair : PROJECTOR_TYPE_NAMES) {
|
152
|
+
if (pair.second == str) {
|
153
|
+
return pair.first;
|
154
|
+
}
|
155
|
+
}
|
156
|
+
return PROJECTOR_TYPE_UNKNOWN;
|
157
|
+
}
|
158
|
+
|
159
|
+
// RGB uint8 image
|
160
|
+
struct clip_image_u8 {
|
161
|
+
int nx;
|
162
|
+
int ny;
|
163
|
+
|
164
|
+
std::vector<uint8_t> buf;
|
165
|
+
};
|
166
|
+
|
167
|
+
// For images, buf.size() == nx*ny*3
|
168
|
+
// Memory layout: RGBRGBRGB...
|
169
|
+
// For audio, only one channel is used, buf.size() == nx*ny
|
170
|
+
// nx will be n_frames and ny will be n_mel
|
171
|
+
struct clip_image_f32 {
|
172
|
+
int nx;
|
173
|
+
int ny;
|
174
|
+
|
175
|
+
std::vector<float> buf;
|
176
|
+
};
|
177
|
+
|
178
|
+
//
|
179
|
+
// logging
|
180
|
+
//
|
181
|
+
|
182
|
+
static void clip_log_callback_default(enum lm_ggml_log_level level, const char * text, void * user_data) {
|
183
|
+
(void) level;
|
184
|
+
(void) user_data;
|
185
|
+
fputs(text, stderr);
|
186
|
+
fflush(stderr);
|
187
|
+
}
|
188
|
+
|
189
|
+
struct clip_logger_state {
|
190
|
+
lm_ggml_log_level verbosity_thold;
|
191
|
+
lm_ggml_log_callback log_callback;
|
192
|
+
void * log_callback_user_data;
|
193
|
+
};
|
194
|
+
|
195
|
+
extern struct clip_logger_state g_logger_state;
|
196
|
+
|
197
|
+
static void clip_log_internal_v(enum lm_ggml_log_level level, const char * format, va_list args) {
|
198
|
+
if (format == NULL) {
|
199
|
+
return;
|
200
|
+
}
|
201
|
+
va_list args_copy;
|
202
|
+
va_copy(args_copy, args);
|
203
|
+
char buffer[128];
|
204
|
+
int len = vsnprintf(buffer, 128, format, args);
|
205
|
+
if (len < 128) {
|
206
|
+
g_logger_state.log_callback(level, buffer, g_logger_state.log_callback_user_data);
|
207
|
+
} else {
|
208
|
+
char * buffer2 = (char *) calloc(len + 1, sizeof(char));
|
209
|
+
vsnprintf(buffer2, len + 1, format, args_copy);
|
210
|
+
buffer2[len] = 0;
|
211
|
+
g_logger_state.log_callback(level, buffer2, g_logger_state.log_callback_user_data);
|
212
|
+
free(buffer2);
|
213
|
+
}
|
214
|
+
va_end(args_copy);
|
215
|
+
}
|
216
|
+
|
217
|
+
static void clip_log_internal(enum lm_ggml_log_level level, const char * format, ...) {
|
218
|
+
va_list args;
|
219
|
+
va_start(args, format);
|
220
|
+
clip_log_internal_v(level, format, args);
|
221
|
+
va_end(args);
|
222
|
+
}
|
223
|
+
|
224
|
+
#define LOG_TMPL(level, ...) \
|
225
|
+
do { \
|
226
|
+
if ((level) >= g_logger_state.verbosity_thold) { \
|
227
|
+
clip_log_internal((level), __VA_ARGS__); \
|
228
|
+
} \
|
229
|
+
} while (0)
|
230
|
+
#define LOG_INF(...) LOG_TMPL(LM_GGML_LOG_LEVEL_INFO, __VA_ARGS__)
|
231
|
+
#define LOG_WRN(...) LOG_TMPL(LM_GGML_LOG_LEVEL_WARN, __VA_ARGS__)
|
232
|
+
#define LOG_ERR(...) LOG_TMPL(LM_GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
|
233
|
+
#define LOG_DBG(...) LOG_TMPL(LM_GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
|
234
|
+
#define LOG_CNT(...) LOG_TMPL(LM_GGML_LOG_LEVEL_CONT, __VA_ARGS__)
|
235
|
+
|
236
|
+
//
|
237
|
+
// cpp wrappers
|
238
|
+
//
|
239
|
+
|
240
|
+
// wrapper for clip_image_size
|
241
|
+
struct clip_image_size_deleter {
|
242
|
+
void operator()(clip_image_size * val) { clip_image_size_free(val); }
|
243
|
+
};
|
244
|
+
typedef std::unique_ptr<clip_image_size, clip_image_size_deleter> clip_image_size_ptr;
|
245
|
+
|
246
|
+
// wrapper for clip_image_u8
|
247
|
+
struct clip_image_u8_deleter {
|
248
|
+
void operator()(clip_image_u8 * val) { clip_image_u8_free(val); }
|
249
|
+
};
|
250
|
+
typedef std::unique_ptr<clip_image_u8, clip_image_u8_deleter> clip_image_u8_ptr;
|
251
|
+
|
252
|
+
// wrapper for clip_image_f32
|
253
|
+
struct clip_image_f32_deleter {
|
254
|
+
void operator()(clip_image_f32 * val) { clip_image_f32_free(val); }
|
255
|
+
};
|
256
|
+
typedef std::unique_ptr<clip_image_f32, clip_image_f32_deleter> clip_image_f32_ptr;
|
257
|
+
|
258
|
+
struct clip_image_u8_batch {
|
259
|
+
std::vector<clip_image_u8_ptr> entries;
|
260
|
+
};
|
261
|
+
|
262
|
+
struct clip_image_f32_batch {
|
263
|
+
std::vector<clip_image_f32_ptr> entries;
|
264
|
+
bool is_audio = false;
|
265
|
+
|
266
|
+
// for llava-uhd style models, we need to know the grid size
|
267
|
+
// note: entries.size() == grid_x * grid_y + 1 (one overview image)
|
268
|
+
int grid_x = 0;
|
269
|
+
int grid_y = 0;
|
270
|
+
|
271
|
+
clip_image_f32_batch clone() const {
|
272
|
+
clip_image_f32_batch new_batch{
|
273
|
+
/* entries */ {},
|
274
|
+
/* is_audio */ is_audio,
|
275
|
+
/* grid_x */ grid_x,
|
276
|
+
/* grid_y */ grid_y,
|
277
|
+
};
|
278
|
+
new_batch.entries.reserve(entries.size());
|
279
|
+
for (const auto & entry : entries) {
|
280
|
+
new_batch.entries.emplace_back(new clip_image_f32(*entry));
|
281
|
+
}
|
282
|
+
return new_batch;
|
283
|
+
}
|
284
|
+
};
|
285
|
+
|
286
|
+
//
|
287
|
+
// common utils
|
288
|
+
//
|
289
|
+
|
290
|
+
static std::string string_format(const char * fmt, ...) {
|
291
|
+
va_list ap;
|
292
|
+
va_list ap2;
|
293
|
+
va_start(ap, fmt);
|
294
|
+
va_copy(ap2, ap);
|
295
|
+
int size = vsnprintf(NULL, 0, fmt, ap);
|
296
|
+
LM_GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
|
297
|
+
std::vector<char> buf(size + 1);
|
298
|
+
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
|
299
|
+
LM_GGML_ASSERT(size2 == size);
|
300
|
+
va_end(ap2);
|
301
|
+
va_end(ap);
|
302
|
+
return std::string(buf.data(), buf.size());
|
303
|
+
}
|
304
|
+
|
305
|
+
static void string_replace_all(std::string & s, const std::string & search, const std::string & replace) {
|
306
|
+
if (search.empty()) {
|
307
|
+
return;
|
308
|
+
}
|
309
|
+
std::string builder;
|
310
|
+
builder.reserve(s.length());
|
311
|
+
size_t pos = 0;
|
312
|
+
size_t last_pos = 0;
|
313
|
+
while ((pos = s.find(search, last_pos)) != std::string::npos) {
|
314
|
+
builder.append(s, last_pos, pos - last_pos);
|
315
|
+
builder.append(replace);
|
316
|
+
last_pos = pos + search.length();
|
317
|
+
}
|
318
|
+
builder.append(s, last_pos, std::string::npos);
|
319
|
+
s = std::move(builder);
|
320
|
+
}
|
321
|
+
|
322
|
+
// split string by a `std::string delim` instead of `char delim`
|
323
|
+
static std::vector<std::string> string_split_str(std::string s, const std::string & delimiter) {
|
324
|
+
std::vector<std::string> tokens;
|
325
|
+
size_t pos = 0;
|
326
|
+
std::string token;
|
327
|
+
while ((pos = s.find(delimiter)) != std::string::npos) {
|
328
|
+
token = s.substr(0, pos);
|
329
|
+
tokens.push_back(token);
|
330
|
+
s.erase(0, pos + delimiter.length());
|
331
|
+
}
|
332
|
+
tokens.push_back(s);
|
333
|
+
return tokens;
|
334
|
+
}
|
335
|
+
|
336
|
+
//
|
337
|
+
// gguf utils
|
338
|
+
//
|
339
|
+
|
340
|
+
static std::string lm_gguf_data_to_str(enum lm_gguf_type type, const void * data, int i) {
|
341
|
+
switch (type) {
|
342
|
+
case LM_GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
|
343
|
+
case LM_GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
|
344
|
+
case LM_GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
|
345
|
+
case LM_GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
|
346
|
+
case LM_GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
|
347
|
+
case LM_GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
|
348
|
+
case LM_GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
|
349
|
+
case LM_GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
|
350
|
+
case LM_GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
|
351
|
+
case LM_GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
|
352
|
+
case LM_GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
|
353
|
+
default: return string_format("unknown type %d", type);
|
354
|
+
}
|
355
|
+
}
|
356
|
+
|
357
|
+
static std::string lm_gguf_kv_to_str(const struct lm_gguf_context * ctx_gguf, int i) {
|
358
|
+
const enum lm_gguf_type type = lm_gguf_get_kv_type(ctx_gguf, i);
|
359
|
+
|
360
|
+
switch (type) {
|
361
|
+
case LM_GGUF_TYPE_STRING:
|
362
|
+
return lm_gguf_get_val_str(ctx_gguf, i);
|
363
|
+
case LM_GGUF_TYPE_ARRAY:
|
364
|
+
{
|
365
|
+
const enum lm_gguf_type arr_type = lm_gguf_get_arr_type(ctx_gguf, i);
|
366
|
+
int arr_n = lm_gguf_get_arr_n(ctx_gguf, i);
|
367
|
+
const void * data = arr_type == LM_GGUF_TYPE_STRING ? nullptr : lm_gguf_get_arr_data(ctx_gguf, i);
|
368
|
+
std::stringstream ss;
|
369
|
+
ss << "[";
|
370
|
+
for (int j = 0; j < arr_n; j++) {
|
371
|
+
if (arr_type == LM_GGUF_TYPE_STRING) {
|
372
|
+
std::string val = lm_gguf_get_arr_str(ctx_gguf, i, j);
|
373
|
+
// escape quotes
|
374
|
+
string_replace_all(val, "\\", "\\\\");
|
375
|
+
string_replace_all(val, "\"", "\\\"");
|
376
|
+
ss << '"' << val << '"';
|
377
|
+
} else if (arr_type == LM_GGUF_TYPE_ARRAY) {
|
378
|
+
ss << "???";
|
379
|
+
} else {
|
380
|
+
ss << lm_gguf_data_to_str(arr_type, data, j);
|
381
|
+
}
|
382
|
+
if (j < arr_n - 1) {
|
383
|
+
ss << ", ";
|
384
|
+
}
|
385
|
+
}
|
386
|
+
ss << "]";
|
387
|
+
return ss.str();
|
388
|
+
}
|
389
|
+
default:
|
390
|
+
return lm_gguf_data_to_str(type, lm_gguf_get_val_data(ctx_gguf, i), 0);
|
391
|
+
}
|
392
|
+
}
|
393
|
+
|
394
|
+
//
|
395
|
+
// debugging
|
396
|
+
//
|
397
|
+
|
398
|
+
static void print_tensor_shape(lm_ggml_tensor * t) {
|
399
|
+
printf("%s.shape = [", t->name);
|
400
|
+
for (int i = 0; i < lm_ggml_n_dims(t); ++i) {
|
401
|
+
printf("%" PRId64, t->ne[i]);
|
402
|
+
if (i < lm_ggml_n_dims(t) - 1) {
|
403
|
+
printf(", ");
|
404
|
+
}
|
405
|
+
}
|
406
|
+
printf("]\n");
|
407
|
+
}
|
408
|
+
|
409
|
+
static void print_tensor_data(lm_ggml_tensor * t, uint8_t * data, int64_t n) {
|
410
|
+
lm_ggml_type type = t->type;
|
411
|
+
int64_t * ne = t->ne;
|
412
|
+
size_t * nb = t->nb;
|
413
|
+
for (int64_t i3 = 0; i3 < ne[3]; i3++) {
|
414
|
+
printf("%s.data: [\n", t->name);
|
415
|
+
for (int64_t i2 = 0; i2 < ne[2]; i2++) {
|
416
|
+
if (i2 == n && ne[2] > 2*n) {
|
417
|
+
printf(" ..., \n");
|
418
|
+
i2 = ne[2] - n;
|
419
|
+
}
|
420
|
+
printf(" [\n");
|
421
|
+
for (int64_t i1 = 0; i1 < ne[1]; i1++) {
|
422
|
+
if (i1 == n && ne[1] > 2*n) {
|
423
|
+
printf(" ..., \n");
|
424
|
+
i1 = ne[1] - n;
|
425
|
+
}
|
426
|
+
printf(" [");
|
427
|
+
for (int64_t i0 = 0; i0 < ne[0]; i0++) {
|
428
|
+
if (i0 == n && ne[0] > 2*n) {
|
429
|
+
printf("..., ");
|
430
|
+
i0 = ne[0] - n;
|
431
|
+
}
|
432
|
+
size_t i = i3 * nb[3] + i2 * nb[2] + i1 * nb[1] + i0 * nb[0];
|
433
|
+
float v;
|
434
|
+
if (type == LM_GGML_TYPE_F16) {
|
435
|
+
v = lm_ggml_fp16_to_fp32(*(lm_ggml_fp16_t *) &data[i]);
|
436
|
+
} else if (type == LM_GGML_TYPE_F32) {
|
437
|
+
v = *(float *) &data[i];
|
438
|
+
} else if (type == LM_GGML_TYPE_I32) {
|
439
|
+
v = (float) *(int32_t *) &data[i];
|
440
|
+
} else if (type == LM_GGML_TYPE_I16) {
|
441
|
+
v = (float) *(int16_t *) &data[i];
|
442
|
+
} else if (type == LM_GGML_TYPE_I8) {
|
443
|
+
v = (float) *(int8_t *) &data[i];
|
444
|
+
} else {
|
445
|
+
LM_GGML_ABORT("fatal error");
|
446
|
+
}
|
447
|
+
printf("%8.4f", v);
|
448
|
+
if (i0 < ne[0] - 1) printf(", ");
|
449
|
+
}
|
450
|
+
printf("],\n");
|
451
|
+
}
|
452
|
+
printf(" ],\n");
|
453
|
+
}
|
454
|
+
printf(" ]\n");
|
455
|
+
}
|
456
|
+
}
|
457
|
+
|
458
|
+
//
|
459
|
+
// API used internally with mtmd
|
460
|
+
//
|
461
|
+
|
462
|
+
projector_type clip_get_projector_type(const struct clip_ctx * ctx);
|