llama-cpp-capacitor 0.0.5 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. package/cpp/LICENSE +21 -0
  2. package/cpp/README.md +4 -0
  3. package/cpp/anyascii.c +22223 -0
  4. package/cpp/anyascii.h +42 -0
  5. package/cpp/chat-parser.cpp +393 -0
  6. package/cpp/chat-parser.h +120 -0
  7. package/cpp/chat.cpp +2315 -0
  8. package/cpp/chat.h +221 -0
  9. package/cpp/common.cpp +1619 -0
  10. package/cpp/common.h +744 -0
  11. package/cpp/ggml-alloc.c +1028 -0
  12. package/cpp/ggml-alloc.h +76 -0
  13. package/cpp/ggml-backend-impl.h +255 -0
  14. package/cpp/ggml-backend-reg.cpp +600 -0
  15. package/cpp/ggml-backend.cpp +2118 -0
  16. package/cpp/ggml-backend.h +354 -0
  17. package/cpp/ggml-common.h +1878 -0
  18. package/cpp/ggml-cpp.h +39 -0
  19. package/cpp/ggml-cpu/amx/amx.cpp +221 -0
  20. package/cpp/ggml-cpu/amx/amx.h +8 -0
  21. package/cpp/ggml-cpu/amx/common.h +91 -0
  22. package/cpp/ggml-cpu/amx/mmq.cpp +2512 -0
  23. package/cpp/ggml-cpu/amx/mmq.h +10 -0
  24. package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  25. package/cpp/ggml-cpu/arch/arm/quants.c +3650 -0
  26. package/cpp/ggml-cpu/arch/arm/repack.cpp +1891 -0
  27. package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
  28. package/cpp/ggml-cpu/arch/x86/quants.c +3820 -0
  29. package/cpp/ggml-cpu/arch/x86/repack.cpp +6307 -0
  30. package/cpp/ggml-cpu/arch-fallback.h +215 -0
  31. package/cpp/ggml-cpu/binary-ops.cpp +158 -0
  32. package/cpp/ggml-cpu/binary-ops.h +16 -0
  33. package/cpp/ggml-cpu/common.h +73 -0
  34. package/cpp/ggml-cpu/ggml-cpu-impl.h +525 -0
  35. package/cpp/ggml-cpu/ggml-cpu.c +3578 -0
  36. package/cpp/ggml-cpu/ggml-cpu.cpp +672 -0
  37. package/cpp/ggml-cpu/ops.cpp +10587 -0
  38. package/cpp/ggml-cpu/ops.h +114 -0
  39. package/cpp/ggml-cpu/quants.c +1193 -0
  40. package/cpp/ggml-cpu/quants.h +97 -0
  41. package/cpp/ggml-cpu/repack.cpp +1982 -0
  42. package/cpp/ggml-cpu/repack.h +120 -0
  43. package/cpp/ggml-cpu/simd-mappings.h +1184 -0
  44. package/cpp/ggml-cpu/traits.cpp +36 -0
  45. package/cpp/ggml-cpu/traits.h +38 -0
  46. package/cpp/ggml-cpu/unary-ops.cpp +186 -0
  47. package/cpp/ggml-cpu/unary-ops.h +28 -0
  48. package/cpp/ggml-cpu/vec.cpp +348 -0
  49. package/cpp/ggml-cpu/vec.h +1121 -0
  50. package/cpp/ggml-cpu.h +145 -0
  51. package/cpp/ggml-impl.h +622 -0
  52. package/cpp/ggml-metal-impl.h +688 -0
  53. package/cpp/ggml-metal.h +66 -0
  54. package/cpp/ggml-metal.m +6833 -0
  55. package/cpp/ggml-opt.cpp +1093 -0
  56. package/cpp/ggml-opt.h +256 -0
  57. package/cpp/ggml-quants.c +5324 -0
  58. package/cpp/ggml-quants.h +106 -0
  59. package/cpp/ggml-threading.cpp +12 -0
  60. package/cpp/ggml-threading.h +14 -0
  61. package/cpp/ggml.c +7108 -0
  62. package/cpp/ggml.h +2492 -0
  63. package/cpp/gguf.cpp +1358 -0
  64. package/cpp/gguf.h +202 -0
  65. package/cpp/json-partial.cpp +256 -0
  66. package/cpp/json-partial.h +38 -0
  67. package/cpp/json-schema-to-grammar.cpp +985 -0
  68. package/cpp/json-schema-to-grammar.h +21 -0
  69. package/cpp/llama-adapter.cpp +388 -0
  70. package/cpp/llama-adapter.h +76 -0
  71. package/cpp/llama-arch.cpp +2355 -0
  72. package/cpp/llama-arch.h +499 -0
  73. package/cpp/llama-batch.cpp +875 -0
  74. package/cpp/llama-batch.h +160 -0
  75. package/cpp/llama-chat.cpp +783 -0
  76. package/cpp/llama-chat.h +65 -0
  77. package/cpp/llama-context.cpp +2748 -0
  78. package/cpp/llama-context.h +306 -0
  79. package/cpp/llama-cparams.cpp +5 -0
  80. package/cpp/llama-cparams.h +41 -0
  81. package/cpp/llama-cpp.h +30 -0
  82. package/cpp/llama-grammar.cpp +1229 -0
  83. package/cpp/llama-grammar.h +173 -0
  84. package/cpp/llama-graph.cpp +1891 -0
  85. package/cpp/llama-graph.h +810 -0
  86. package/cpp/llama-hparams.cpp +180 -0
  87. package/cpp/llama-hparams.h +233 -0
  88. package/cpp/llama-impl.cpp +167 -0
  89. package/cpp/llama-impl.h +61 -0
  90. package/cpp/llama-io.cpp +15 -0
  91. package/cpp/llama-io.h +35 -0
  92. package/cpp/llama-kv-cache-iswa.cpp +318 -0
  93. package/cpp/llama-kv-cache-iswa.h +135 -0
  94. package/cpp/llama-kv-cache.cpp +2059 -0
  95. package/cpp/llama-kv-cache.h +374 -0
  96. package/cpp/llama-kv-cells.h +491 -0
  97. package/cpp/llama-memory-hybrid.cpp +258 -0
  98. package/cpp/llama-memory-hybrid.h +137 -0
  99. package/cpp/llama-memory-recurrent.cpp +1146 -0
  100. package/cpp/llama-memory-recurrent.h +179 -0
  101. package/cpp/llama-memory.cpp +59 -0
  102. package/cpp/llama-memory.h +119 -0
  103. package/cpp/llama-mmap.cpp +600 -0
  104. package/cpp/llama-mmap.h +68 -0
  105. package/cpp/llama-model-loader.cpp +1164 -0
  106. package/cpp/llama-model-loader.h +170 -0
  107. package/cpp/llama-model-saver.cpp +282 -0
  108. package/cpp/llama-model-saver.h +37 -0
  109. package/cpp/llama-model.cpp +19042 -0
  110. package/cpp/llama-model.h +491 -0
  111. package/cpp/llama-sampling.cpp +2575 -0
  112. package/cpp/llama-sampling.h +32 -0
  113. package/cpp/llama-vocab.cpp +3792 -0
  114. package/cpp/llama-vocab.h +176 -0
  115. package/cpp/llama.cpp +358 -0
  116. package/cpp/llama.h +1373 -0
  117. package/cpp/log.cpp +427 -0
  118. package/cpp/log.h +103 -0
  119. package/cpp/minja/chat-template.hpp +550 -0
  120. package/cpp/minja/minja.hpp +3009 -0
  121. package/cpp/nlohmann/json.hpp +25526 -0
  122. package/cpp/nlohmann/json_fwd.hpp +187 -0
  123. package/cpp/regex-partial.cpp +204 -0
  124. package/cpp/regex-partial.h +56 -0
  125. package/cpp/rn-completion.cpp +681 -0
  126. package/cpp/rn-completion.h +116 -0
  127. package/cpp/rn-llama.cpp +345 -0
  128. package/cpp/rn-llama.h +149 -0
  129. package/cpp/rn-mtmd.hpp +602 -0
  130. package/cpp/rn-tts.cpp +591 -0
  131. package/cpp/rn-tts.h +59 -0
  132. package/cpp/sampling.cpp +579 -0
  133. package/cpp/sampling.h +107 -0
  134. package/cpp/tools/mtmd/clip-impl.h +473 -0
  135. package/cpp/tools/mtmd/clip.cpp +4322 -0
  136. package/cpp/tools/mtmd/clip.h +106 -0
  137. package/cpp/tools/mtmd/miniaudio/miniaudio.h +93468 -0
  138. package/cpp/tools/mtmd/mtmd-audio.cpp +769 -0
  139. package/cpp/tools/mtmd/mtmd-audio.h +47 -0
  140. package/cpp/tools/mtmd/mtmd-helper.cpp +460 -0
  141. package/cpp/tools/mtmd/mtmd-helper.h +91 -0
  142. package/cpp/tools/mtmd/mtmd.cpp +1066 -0
  143. package/cpp/tools/mtmd/mtmd.h +298 -0
  144. package/cpp/tools/mtmd/stb/stb_image.h +7988 -0
  145. package/cpp/unicode-data.cpp +7034 -0
  146. package/cpp/unicode-data.h +20 -0
  147. package/cpp/unicode.cpp +1061 -0
  148. package/cpp/unicode.h +68 -0
  149. package/package.json +2 -1
@@ -0,0 +1,4322 @@
1
+ // NOTE: This is modified from clip.cpp only for LLaVA,
2
+ // so there might be still unnecessary artifacts hanging around
3
+ // I'll gradually clean and extend it
4
+ // Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch
5
+ #include "clip.h"
6
+ #include "clip-impl.h"
7
+ #include "ggml.h"
8
+ #include "ggml-cpp.h"
9
+ #include "ggml-cpu.h"
10
+ #include "ggml-alloc.h"
11
+ #include "ggml-backend.h"
12
+ #include "gguf.h"
13
+
14
+ #include <cassert>
15
+ #include <cmath>
16
+ #include <cstdlib>
17
+ #include <cstring>
18
+ #include <fstream>
19
+ #include <map>
20
+ #include <regex>
21
+ #include <stdexcept>
22
+ #include <unordered_set>
23
+ #include <vector>
24
+ #include <sstream>
25
+ #include <cinttypes>
26
+ #include <limits>
27
+ #include <array>
28
+ #include <numeric>
29
+ #include <functional>
30
+
31
+ struct clip_logger_state g_logger_state = {LM_GGML_LOG_LEVEL_CONT, clip_log_callback_default, NULL};
32
+
33
+ enum ffn_op_type {
34
+ FFN_GELU,
35
+ FFN_GELU_ERF,
36
+ FFN_SILU,
37
+ FFN_GELU_QUICK,
38
+ };
39
+
40
+ enum norm_type {
41
+ NORM_TYPE_NORMAL,
42
+ NORM_TYPE_RMS,
43
+ };
44
+
45
+ //#define CLIP_DEBUG_FUNCTIONS
46
+
47
+ #ifdef CLIP_DEBUG_FUNCTIONS
48
+ static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) {
49
+ std::ofstream file(filename, std::ios::binary);
50
+ if (!file.is_open()) {
51
+ LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
52
+ return;
53
+ }
54
+
55
+ // PPM header: P6 format, width, height, and max color value
56
+ file << "P6\n" << img.nx << " " << img.ny << "\n255\n";
57
+
58
+ // Write pixel data
59
+ for (size_t i = 0; i < img.buf.size(); i += 3) {
60
+ // PPM expects binary data in RGB format, which matches our image buffer
61
+ file.write(reinterpret_cast<const char*>(&img.buf[i]), 3);
62
+ }
63
+
64
+ file.close();
65
+ }
66
+
67
+ static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) {
68
+ std::ofstream file(filename, std::ios::binary);
69
+ if (!file.is_open()) {
70
+ LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
71
+ return;
72
+ }
73
+
74
+ int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data
75
+ int bytesPerPixel = 3;
76
+ int widthInBytes = img.nx * bytesPerPixel;
77
+ int paddingAmount = (4 - (widthInBytes % 4)) % 4;
78
+ int stride = widthInBytes + paddingAmount;
79
+
80
+ // Bitmap file header
81
+ unsigned char fileHeader[14] = {
82
+ 'B','M', // Signature
83
+ 0,0,0,0, // Image file size in bytes
84
+ 0,0,0,0, // Reserved
85
+ 54,0,0,0 // Start of pixel array
86
+ };
87
+
88
+ // Total file size
89
+ fileSize = 54 + (stride * img.ny);
90
+ fileHeader[2] = (unsigned char)(fileSize);
91
+ fileHeader[3] = (unsigned char)(fileSize >> 8);
92
+ fileHeader[4] = (unsigned char)(fileSize >> 16);
93
+ fileHeader[5] = (unsigned char)(fileSize >> 24);
94
+
95
+ // Bitmap information header (BITMAPINFOHEADER)
96
+ unsigned char infoHeader[40] = {
97
+ 40,0,0,0, // Size of this header (40 bytes)
98
+ 0,0,0,0, // Image width
99
+ 0,0,0,0, // Image height
100
+ 1,0, // Number of color planes
101
+ 24,0, // Bits per pixel
102
+ 0,0,0,0, // No compression
103
+ 0,0,0,0, // Image size (can be 0 for no compression)
104
+ 0,0,0,0, // X pixels per meter (not specified)
105
+ 0,0,0,0, // Y pixels per meter (not specified)
106
+ 0,0,0,0, // Total colors (color table not used)
107
+ 0,0,0,0 // Important colors (all are important)
108
+ };
109
+
110
+ // Width and height in the information header
111
+ infoHeader[4] = (unsigned char)(img.nx);
112
+ infoHeader[5] = (unsigned char)(img.nx >> 8);
113
+ infoHeader[6] = (unsigned char)(img.nx >> 16);
114
+ infoHeader[7] = (unsigned char)(img.nx >> 24);
115
+ infoHeader[8] = (unsigned char)(img.ny);
116
+ infoHeader[9] = (unsigned char)(img.ny >> 8);
117
+ infoHeader[10] = (unsigned char)(img.ny >> 16);
118
+ infoHeader[11] = (unsigned char)(img.ny >> 24);
119
+
120
+ // Write file headers
121
+ file.write(reinterpret_cast<char*>(fileHeader), sizeof(fileHeader));
122
+ file.write(reinterpret_cast<char*>(infoHeader), sizeof(infoHeader));
123
+
124
+ // Pixel data
125
+ std::vector<unsigned char> padding(3, 0); // Max padding size to be added to each row
126
+ for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top
127
+ for (int x = 0; x < img.nx; ++x) {
128
+ // Each pixel
129
+ size_t pixelIndex = (y * img.nx + x) * 3;
130
+ unsigned char pixel[3] = {
131
+ img.buf[pixelIndex + 2], // BMP stores pixels in BGR format
132
+ img.buf[pixelIndex + 1],
133
+ img.buf[pixelIndex]
134
+ };
135
+ file.write(reinterpret_cast<char*>(pixel), 3);
136
+ }
137
+ // Write padding for the row
138
+ file.write(reinterpret_cast<char*>(padding.data()), paddingAmount);
139
+ }
140
+
141
+ file.close();
142
+ }
143
+
144
+ // debug function to convert f32 to u8
145
+ static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) {
146
+ dst.nx = src.nx;
147
+ dst.ny = src.ny;
148
+ dst.buf.resize(3 * src.nx * src.ny);
149
+ for (size_t i = 0; i < src.buf.size(); ++i) {
150
+ dst.buf[i] = static_cast<uint8_t>(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255));
151
+ }
152
+ }
153
+ #endif
154
+
155
+
156
+ //
157
+ // clip layers
158
+ //
159
+
160
+ enum patch_merge_type {
161
+ PATCH_MERGE_FLAT,
162
+ PATCH_MERGE_SPATIAL_UNPAD,
163
+ };
164
+
165
+ struct clip_hparams {
166
+ int32_t image_size;
167
+ int32_t patch_size;
168
+ int32_t n_embd;
169
+ int32_t n_ff;
170
+ int32_t projection_dim;
171
+ int32_t n_head;
172
+ int32_t n_layer;
173
+ int32_t proj_scale_factor = 0; // idefics3
174
+
175
+ float image_mean[3];
176
+ float image_std[3];
177
+
178
+ // for models using dynamic image size, we need to have a smaller image size to warmup
179
+ // otherwise, user will get OOM everytime they load the model
180
+ int32_t warmup_image_size = 0;
181
+ int32_t warmup_audio_size = 3000;
182
+
183
+ ffn_op_type ffn_op = FFN_GELU;
184
+
185
+ patch_merge_type mm_patch_merge_type = PATCH_MERGE_FLAT;
186
+
187
+ float eps = 1e-6;
188
+ float rope_theta = 0.0;
189
+
190
+ std::vector<clip_image_size> image_res_candidates; // for llava-uhd style models
191
+ int32_t image_crop_resolution;
192
+ std::unordered_set<int32_t> vision_feature_layer;
193
+ int32_t attn_window_size = 0;
194
+ int32_t n_wa_pattern = 0;
195
+ int32_t spatial_merge_size = 0;
196
+
197
+ // audio
198
+ int32_t n_mel_bins = 0; // whisper preprocessor
199
+ int32_t proj_stack_factor = 0; // ultravox
200
+
201
+ // legacy
202
+ bool has_llava_projector = false;
203
+ int minicpmv_version = 0;
204
+ int32_t minicpmv_query_num = 0; // MiniCPM-V query number
205
+ };
206
+
207
+ struct clip_layer {
208
+ // attention
209
+ lm_ggml_tensor * k_w = nullptr;
210
+ lm_ggml_tensor * k_b = nullptr;
211
+ lm_ggml_tensor * q_w = nullptr;
212
+ lm_ggml_tensor * q_b = nullptr;
213
+ lm_ggml_tensor * v_w = nullptr;
214
+ lm_ggml_tensor * v_b = nullptr;
215
+
216
+ lm_ggml_tensor * o_w = nullptr;
217
+ lm_ggml_tensor * o_b = nullptr;
218
+
219
+ lm_ggml_tensor * k_norm = nullptr;
220
+ lm_ggml_tensor * q_norm = nullptr;
221
+
222
+ // layernorm 1
223
+ lm_ggml_tensor * ln_1_w = nullptr;
224
+ lm_ggml_tensor * ln_1_b = nullptr;
225
+
226
+ lm_ggml_tensor * ff_up_w = nullptr;
227
+ lm_ggml_tensor * ff_up_b = nullptr;
228
+ lm_ggml_tensor * ff_gate_w = nullptr;
229
+ lm_ggml_tensor * ff_gate_b = nullptr;
230
+ lm_ggml_tensor * ff_down_w = nullptr;
231
+ lm_ggml_tensor * ff_down_b = nullptr;
232
+
233
+ // layernorm 2
234
+ lm_ggml_tensor * ln_2_w = nullptr;
235
+ lm_ggml_tensor * ln_2_b = nullptr;
236
+
237
+ // layer scale (no bias)
238
+ lm_ggml_tensor * ls_1_w = nullptr;
239
+ lm_ggml_tensor * ls_2_w = nullptr;
240
+ };
241
+
242
+ struct clip_model {
243
+ clip_modality modality = CLIP_MODALITY_VISION;
244
+ projector_type proj_type = PROJECTOR_TYPE_MLP;
245
+ clip_hparams hparams;
246
+
247
+ // embeddings
248
+ lm_ggml_tensor * class_embedding = nullptr;
249
+ lm_ggml_tensor * patch_embeddings_0 = nullptr;
250
+ lm_ggml_tensor * patch_embeddings_1 = nullptr; // second Conv2D kernel when we decouple Conv3D along temproal dimension (Qwen2VL)
251
+ lm_ggml_tensor * patch_bias = nullptr;
252
+ lm_ggml_tensor * position_embeddings = nullptr;
253
+
254
+ lm_ggml_tensor * pre_ln_w = nullptr;
255
+ lm_ggml_tensor * pre_ln_b = nullptr;
256
+
257
+ std::vector<clip_layer> layers;
258
+
259
+ lm_ggml_tensor * post_ln_w;
260
+ lm_ggml_tensor * post_ln_b;
261
+
262
+ lm_ggml_tensor * projection; // TODO: rename it to fc (fully connected layer)
263
+ lm_ggml_tensor * mm_fc_w;
264
+ lm_ggml_tensor * mm_fc_b;
265
+
266
+ // LLaVA projection
267
+ lm_ggml_tensor * mm_input_norm_w = nullptr;
268
+ lm_ggml_tensor * mm_input_norm_b = nullptr;
269
+ lm_ggml_tensor * mm_0_w = nullptr;
270
+ lm_ggml_tensor * mm_0_b = nullptr;
271
+ lm_ggml_tensor * mm_2_w = nullptr;
272
+ lm_ggml_tensor * mm_2_b = nullptr;
273
+
274
+ lm_ggml_tensor * image_newline = nullptr;
275
+
276
+ // Yi type models with mlp+normalization projection
277
+ lm_ggml_tensor * mm_1_w = nullptr; // Yi type models have 0, 1, 3, 4
278
+ lm_ggml_tensor * mm_1_b = nullptr;
279
+ lm_ggml_tensor * mm_3_w = nullptr;
280
+ lm_ggml_tensor * mm_3_b = nullptr;
281
+ lm_ggml_tensor * mm_4_w = nullptr;
282
+ lm_ggml_tensor * mm_4_b = nullptr;
283
+
284
+ // GLMV-Edge projection
285
+ lm_ggml_tensor * mm_model_adapter_conv_w = nullptr;
286
+ lm_ggml_tensor * mm_model_adapter_conv_b = nullptr;
287
+ lm_ggml_tensor * mm_glm_tok_boi = nullptr;
288
+ lm_ggml_tensor * mm_glm_tok_eoi = nullptr;
289
+
290
+ // MobileVLM projection
291
+ lm_ggml_tensor * mm_model_mlp_1_w = nullptr;
292
+ lm_ggml_tensor * mm_model_mlp_1_b = nullptr;
293
+ lm_ggml_tensor * mm_model_mlp_3_w = nullptr;
294
+ lm_ggml_tensor * mm_model_mlp_3_b = nullptr;
295
+ lm_ggml_tensor * mm_model_block_1_block_0_0_w = nullptr;
296
+ lm_ggml_tensor * mm_model_block_1_block_0_1_w = nullptr;
297
+ lm_ggml_tensor * mm_model_block_1_block_0_1_b = nullptr;
298
+ lm_ggml_tensor * mm_model_block_1_block_1_fc1_w = nullptr;
299
+ lm_ggml_tensor * mm_model_block_1_block_1_fc1_b = nullptr;
300
+ lm_ggml_tensor * mm_model_block_1_block_1_fc2_w = nullptr;
301
+ lm_ggml_tensor * mm_model_block_1_block_1_fc2_b = nullptr;
302
+ lm_ggml_tensor * mm_model_block_1_block_2_0_w = nullptr;
303
+ lm_ggml_tensor * mm_model_block_1_block_2_1_w = nullptr;
304
+ lm_ggml_tensor * mm_model_block_1_block_2_1_b = nullptr;
305
+ lm_ggml_tensor * mm_model_block_2_block_0_0_w = nullptr;
306
+ lm_ggml_tensor * mm_model_block_2_block_0_1_w = nullptr;
307
+ lm_ggml_tensor * mm_model_block_2_block_0_1_b = nullptr;
308
+ lm_ggml_tensor * mm_model_block_2_block_1_fc1_w = nullptr;
309
+ lm_ggml_tensor * mm_model_block_2_block_1_fc1_b = nullptr;
310
+ lm_ggml_tensor * mm_model_block_2_block_1_fc2_w = nullptr;
311
+ lm_ggml_tensor * mm_model_block_2_block_1_fc2_b = nullptr;
312
+ lm_ggml_tensor * mm_model_block_2_block_2_0_w = nullptr;
313
+ lm_ggml_tensor * mm_model_block_2_block_2_1_w = nullptr;
314
+ lm_ggml_tensor * mm_model_block_2_block_2_1_b = nullptr;
315
+
316
+ // MobileVLM_V2 projection
317
+ lm_ggml_tensor * mm_model_mlp_0_w = nullptr;
318
+ lm_ggml_tensor * mm_model_mlp_0_b = nullptr;
319
+ lm_ggml_tensor * mm_model_mlp_2_w = nullptr;
320
+ lm_ggml_tensor * mm_model_mlp_2_b = nullptr;
321
+ lm_ggml_tensor * mm_model_peg_0_w = nullptr;
322
+ lm_ggml_tensor * mm_model_peg_0_b = nullptr;
323
+
324
+ // MINICPMV projection
325
+ lm_ggml_tensor * mm_model_pos_embed_k = nullptr;
326
+ lm_ggml_tensor * mm_model_query = nullptr;
327
+ lm_ggml_tensor * mm_model_proj = nullptr;
328
+ lm_ggml_tensor * mm_model_kv_proj = nullptr;
329
+ lm_ggml_tensor * mm_model_attn_q_w = nullptr;
330
+ lm_ggml_tensor * mm_model_attn_q_b = nullptr;
331
+ lm_ggml_tensor * mm_model_attn_k_w = nullptr;
332
+ lm_ggml_tensor * mm_model_attn_k_b = nullptr;
333
+ lm_ggml_tensor * mm_model_attn_v_w = nullptr;
334
+ lm_ggml_tensor * mm_model_attn_v_b = nullptr;
335
+ lm_ggml_tensor * mm_model_attn_o_w = nullptr;
336
+ lm_ggml_tensor * mm_model_attn_o_b = nullptr;
337
+ lm_ggml_tensor * mm_model_ln_q_w = nullptr;
338
+ lm_ggml_tensor * mm_model_ln_q_b = nullptr;
339
+ lm_ggml_tensor * mm_model_ln_kv_w = nullptr;
340
+ lm_ggml_tensor * mm_model_ln_kv_b = nullptr;
341
+ lm_ggml_tensor * mm_model_ln_post_w = nullptr;
342
+ lm_ggml_tensor * mm_model_ln_post_b = nullptr;
343
+
344
+ // gemma3
345
+ lm_ggml_tensor * mm_input_proj_w = nullptr;
346
+ lm_ggml_tensor * mm_soft_emb_norm_w = nullptr;
347
+
348
+ // pixtral
349
+ lm_ggml_tensor * token_embd_img_break = nullptr;
350
+ lm_ggml_tensor * mm_patch_merger_w = nullptr;
351
+
352
+ // ultravox / whisper encoder
353
+ lm_ggml_tensor * conv1d_1_w = nullptr;
354
+ lm_ggml_tensor * conv1d_1_b = nullptr;
355
+ lm_ggml_tensor * conv1d_2_w = nullptr;
356
+ lm_ggml_tensor * conv1d_2_b = nullptr;
357
+ lm_ggml_tensor * mm_norm_pre_w = nullptr;
358
+ lm_ggml_tensor * mm_norm_mid_w = nullptr;
359
+
360
+ bool audio_has_avgpool() const {
361
+ return proj_type == PROJECTOR_TYPE_QWEN2A
362
+ || proj_type == PROJECTOR_TYPE_VOXTRAL;
363
+ }
364
+
365
+ bool audio_has_stack_frames() const {
366
+ return proj_type == PROJECTOR_TYPE_ULTRAVOX
367
+ || proj_type == PROJECTOR_TYPE_VOXTRAL;
368
+ }
369
+ };
370
+
371
+ struct clip_ctx {
372
+ clip_model model;
373
+
374
+ lm_gguf_context_ptr ctx_gguf;
375
+ lm_ggml_context_ptr ctx_data;
376
+
377
+ std::vector<uint8_t> buf_compute_meta;
378
+
379
+ std::vector<lm_ggml_backend_t> backend_ptrs;
380
+ std::vector<lm_ggml_backend_buffer_type_t> backend_buft;
381
+
382
+ lm_ggml_backend_t backend = nullptr;
383
+ lm_ggml_backend_t backend_cpu = nullptr;
384
+ lm_ggml_backend_buffer_ptr buf;
385
+
386
+ int max_nodes = 8192;
387
+ lm_ggml_backend_sched_ptr sched;
388
+
389
+ // for debugging
390
+ bool debug_graph = false;
391
+ std::vector<lm_ggml_tensor *> debug_print_tensors;
392
+
393
+ clip_ctx(clip_context_params & ctx_params) {
394
+ debug_graph = std::getenv("MTMD_DEBUG_GRAPH") != nullptr;
395
+ backend_cpu = lm_ggml_backend_init_by_type(LM_GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
396
+ if (!backend_cpu) {
397
+ throw std::runtime_error("failed to initialize CPU backend");
398
+ }
399
+ if (ctx_params.use_gpu) {
400
+ auto backend_name = std::getenv("MTMD_BACKEND_DEVICE");
401
+ if (backend_name != nullptr) {
402
+ backend = lm_ggml_backend_init_by_name(backend_name, nullptr);
403
+ if (!backend) {
404
+ LOG_WRN("%s: Warning: Failed to initialize \"%s\" backend, falling back to default GPU backend\n", __func__, backend_name);
405
+ }
406
+ }
407
+ if (!backend) {
408
+ backend = lm_ggml_backend_init_by_type(LM_GGML_BACKEND_DEVICE_TYPE_GPU, nullptr);
409
+ }
410
+ }
411
+
412
+ if (backend) {
413
+ LOG_INF("%s: CLIP using %s backend\n", __func__, lm_ggml_backend_name(backend));
414
+ backend_ptrs.push_back(backend);
415
+ backend_buft.push_back(lm_ggml_backend_get_default_buffer_type(backend));
416
+ } else {
417
+ backend = backend_cpu;
418
+ LOG_INF("%s: CLIP using CPU backend\n", __func__);
419
+ }
420
+
421
+ backend_ptrs.push_back(backend_cpu);
422
+ backend_buft.push_back(lm_ggml_backend_get_default_buffer_type(backend_cpu));
423
+
424
+ sched.reset(
425
+ lm_ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false, true)
426
+ );
427
+ }
428
+
429
+ ~clip_ctx() {
430
+ lm_ggml_backend_free(backend);
431
+ if (backend != backend_cpu) {
432
+ lm_ggml_backend_free(backend_cpu);
433
+ }
434
+ }
435
+
436
+ // this function is added so that we don't change too much of the existing code
437
+ projector_type proj_type() const {
438
+ return model.proj_type;
439
+ }
440
+ };
441
+
442
+ struct clip_graph {
443
+ clip_ctx * ctx;
444
+ const clip_model & model;
445
+ const clip_hparams & hparams;
446
+
447
+ // we only support single image per batch
448
+ const clip_image_f32 & img;
449
+
450
+ const int patch_size;
451
+ const int n_patches_x;
452
+ const int n_patches_y;
453
+ const int n_patches;
454
+ const int n_embd;
455
+ const int n_head;
456
+ const int d_head;
457
+ const int n_layer;
458
+ const float eps;
459
+ const float kq_scale;
460
+
461
+ lm_ggml_context_ptr ctx0_ptr;
462
+ lm_ggml_context * ctx0;
463
+ lm_ggml_cgraph * gf;
464
+
465
+ clip_graph(clip_ctx * ctx, const clip_image_f32 & img) :
466
+ ctx(ctx),
467
+ model(ctx->model),
468
+ hparams(model.hparams),
469
+ img(img),
470
+ patch_size(hparams.patch_size),
471
+ n_patches_x(img.nx / patch_size),
472
+ n_patches_y(img.ny / patch_size),
473
+ n_patches(n_patches_x * n_patches_y),
474
+ n_embd(hparams.n_embd),
475
+ n_head(hparams.n_head),
476
+ d_head(n_embd / n_head),
477
+ n_layer(hparams.n_layer),
478
+ eps(hparams.eps),
479
+ kq_scale(1.0f / sqrtf((float)d_head)) {
480
+ struct lm_ggml_init_params params = {
481
+ /*.mem_size =*/ ctx->buf_compute_meta.size(),
482
+ /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
483
+ /*.no_alloc =*/ true,
484
+ };
485
+ ctx0_ptr.reset(lm_ggml_init(params));
486
+ ctx0 = ctx0_ptr.get();
487
+ gf = lm_ggml_new_graph_custom(ctx0, ctx->max_nodes, false);
488
+ }
489
+
490
+ lm_ggml_cgraph * build_siglip() {
491
+ lm_ggml_tensor * inp = build_inp();
492
+
493
+ lm_ggml_tensor * learned_pos_embd = model.position_embeddings;
494
+ if (ctx->proj_type() == PROJECTOR_TYPE_LFM2) {
495
+ learned_pos_embd = resize_position_embeddings();
496
+ }
497
+
498
+ lm_ggml_tensor * cur = build_vit(
499
+ inp, n_patches,
500
+ NORM_TYPE_NORMAL,
501
+ hparams.ffn_op,
502
+ learned_pos_embd,
503
+ nullptr);
504
+
505
+ if (ctx->proj_type() == PROJECTOR_TYPE_GEMMA3) {
506
+ const int batch_size = 1;
507
+ LM_GGML_ASSERT(n_patches_x == n_patches_y);
508
+ const int patches_per_image = n_patches_x;
509
+ const int kernel_size = hparams.proj_scale_factor;
510
+
511
+ cur = lm_ggml_transpose(ctx0, cur);
512
+ cur = lm_ggml_cont_4d(ctx0, cur, patches_per_image, patches_per_image, n_embd, batch_size);
513
+
514
+ // doing a pool2d to reduce the number of output tokens
515
+ cur = lm_ggml_pool_2d(ctx0, cur, LM_GGML_OP_POOL_AVG, kernel_size, kernel_size, kernel_size, kernel_size, 0, 0);
516
+ cur = lm_ggml_reshape_3d(ctx0, cur, cur->ne[0] * cur->ne[0], n_embd, batch_size);
517
+ cur = lm_ggml_cont(ctx0, lm_ggml_transpose(ctx0, cur));
518
+
519
+ // apply norm before projection
520
+ cur = lm_ggml_rms_norm(ctx0, cur, eps);
521
+ cur = lm_ggml_mul(ctx0, cur, model.mm_soft_emb_norm_w);
522
+
523
+ // apply projection
524
+ cur = lm_ggml_mul_mat(ctx0,
525
+ lm_ggml_cont(ctx0, lm_ggml_transpose(ctx0, model.mm_input_proj_w)),
526
+ cur);
527
+
528
+ } else if (ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3) {
529
+ // https://github.com/huggingface/transformers/blob/0a950e0bbe1ed58d5401a6b547af19f15f0c195e/src/transformers/models/idefics3/modeling_idefics3.py#L578
530
+
531
+ const int scale_factor = model.hparams.proj_scale_factor;
532
+ const int n_embd = cur->ne[0];
533
+ const int seq = cur->ne[1];
534
+ const int bsz = 1; // batch size, always 1 for now since we don't support batching
535
+ const int height = std::sqrt(seq);
536
+ const int width = std::sqrt(seq);
537
+ LM_GGML_ASSERT(scale_factor != 0);
538
+ cur = lm_ggml_reshape_4d(ctx0, cur, n_embd * scale_factor, width / scale_factor, height, bsz);
539
+ cur = lm_ggml_permute(ctx0, cur, 0, 2, 1, 3);
540
+ cur = lm_ggml_cont_4d(ctx0, cur,
541
+ n_embd * scale_factor * scale_factor,
542
+ height / scale_factor,
543
+ width / scale_factor,
544
+ bsz);
545
+ cur = lm_ggml_permute(ctx0, cur, 0, 2, 1, 3);
546
+ cur = lm_ggml_cont_3d(ctx0, cur,
547
+ n_embd * scale_factor * scale_factor,
548
+ seq / (scale_factor * scale_factor),
549
+ bsz);
550
+
551
+ cur = lm_ggml_mul_mat(ctx0, model.projection, cur);
552
+ } else if (ctx->proj_type() == PROJECTOR_TYPE_LFM2) {
553
+ // pixel unshuffle block
554
+ const int scale_factor = model.hparams.proj_scale_factor;
555
+ LM_GGML_ASSERT(scale_factor > 1);
556
+
557
+ const int n_embd = cur->ne[0];
558
+ int width = img.nx / patch_size;
559
+ int height = img.ny / patch_size;
560
+
561
+ // pad width and height to factor
562
+ const int64_t pad_width = CLIP_ALIGN(width, scale_factor) - width;
563
+ const int64_t pad_height = CLIP_ALIGN(height, scale_factor) - height;
564
+ cur = lm_ggml_reshape_3d(ctx0, cur, n_embd, width, height);
565
+ if (pad_width || pad_height) {
566
+ cur = lm_ggml_pad(ctx0, cur, 0, pad_width, pad_height, 0);
567
+ width += pad_width;
568
+ height += pad_height;
569
+ }
570
+
571
+ // unshuffle h
572
+ cur = lm_ggml_reshape_3d(ctx0, cur, n_embd * scale_factor, width / scale_factor, height);
573
+ cur = lm_ggml_permute(ctx0, cur, 0, 2, 1, 3);
574
+
575
+ // unshuffle w
576
+ cur = lm_ggml_cont_3d(ctx0, cur, n_embd * scale_factor * scale_factor, height / scale_factor, width / scale_factor);
577
+ cur = lm_ggml_permute(ctx0, cur, 0, 2, 1, 3);
578
+
579
+ cur = lm_ggml_cont_2d(ctx0, cur, cur->ne[0], cur->ne[1] * cur->ne[2]);
580
+
581
+ // projection
582
+ cur = lm_ggml_norm(ctx0, cur, 1e-5); // default nn.LayerNorm
583
+ cur = lm_ggml_mul(ctx0, cur, model.mm_input_norm_w);
584
+ cur = lm_ggml_add(ctx0, cur, model.mm_input_norm_b);
585
+
586
+ cur = lm_ggml_mul_mat(ctx0, model.mm_1_w, cur);
587
+ cur = lm_ggml_add(ctx0, cur, model.mm_1_b);
588
+ cur = lm_ggml_gelu(ctx0, cur);
589
+ cur = lm_ggml_mul_mat(ctx0, model.mm_2_w, cur);
590
+ cur = lm_ggml_add(ctx0, cur, model.mm_2_b);
591
+ } else {
592
+ LM_GGML_ABORT("SigLIP: Unsupported projector type");
593
+ }
594
+
595
+ // build the graph
596
+ lm_ggml_build_forward_expand(gf, cur);
597
+
598
+ return gf;
599
+ }
600
+
601
+ lm_ggml_cgraph * build_pixtral() {
602
+ const int n_merge = hparams.spatial_merge_size;
603
+
604
+ // 2D input positions
605
+ lm_ggml_tensor * pos_h = lm_ggml_new_tensor_1d(ctx0, LM_GGML_TYPE_I32, n_patches);
606
+ lm_ggml_set_name(pos_h, "pos_h");
607
+ lm_ggml_set_input(pos_h);
608
+
609
+ lm_ggml_tensor * pos_w = lm_ggml_new_tensor_1d(ctx0, LM_GGML_TYPE_I32, n_patches);
610
+ lm_ggml_set_name(pos_w, "pos_w");
611
+ lm_ggml_set_input(pos_w);
612
+
613
+ auto add_pos = [&](lm_ggml_tensor * cur, const clip_layer &) {
614
+ return build_rope_2d(ctx0, cur, pos_h, pos_w, hparams.rope_theta, true);
615
+ };
616
+
617
+ lm_ggml_tensor * inp = build_inp();
618
+ lm_ggml_tensor * cur = build_vit(
619
+ inp, n_patches,
620
+ NORM_TYPE_RMS,
621
+ hparams.ffn_op,
622
+ nullptr, // no learned pos embd
623
+ add_pos);
624
+
625
+ // mistral small 3.1 patch merger
626
+ // ref: https://github.com/huggingface/transformers/blob/7a3e208892c06a5e278144eaf38c8599a42f53e7/src/transformers/models/mistral3/modeling_mistral3.py#L67
627
+ if (model.mm_patch_merger_w) {
628
+ LM_GGML_ASSERT(hparams.spatial_merge_size > 0);
629
+
630
+ cur = lm_ggml_mul(ctx0, lm_ggml_rms_norm(ctx0, cur, eps), model.mm_input_norm_w);
631
+
632
+ // reshape image tokens to 2D grid
633
+ cur = lm_ggml_reshape_3d(ctx0, cur, n_embd, n_patches_x, n_patches_y);
634
+ cur = lm_ggml_permute(ctx0, cur, 2, 0, 1, 3); // [x, y, n_embd]
635
+ cur = lm_ggml_cont(ctx0, cur);
636
+
637
+ // torch.nn.functional.unfold is just an im2col under the hood
638
+ // we just need a dummy kernel to make it work
639
+ lm_ggml_tensor * kernel = lm_ggml_view_3d(ctx0, cur, n_merge, n_merge, cur->ne[2], 0, 0, 0);
640
+ cur = lm_ggml_im2col(ctx0, kernel, cur, n_merge, n_merge, 0, 0, 1, 1, true, inp->type);
641
+
642
+ // project to n_embd
643
+ cur = lm_ggml_reshape_2d(ctx0, cur, cur->ne[0], cur->ne[1] * cur->ne[2]);
644
+ cur = lm_ggml_mul_mat(ctx0, model.mm_patch_merger_w, cur);
645
+ }
646
+
647
+ // LlavaMultiModalProjector (always using GELU activation)
648
+ {
649
+ cur = lm_ggml_mul_mat(ctx0, model.mm_1_w, cur);
650
+ if (model.mm_1_b) {
651
+ cur = lm_ggml_add(ctx0, cur, model.mm_1_b);
652
+ }
653
+
654
+ cur = lm_ggml_gelu(ctx0, cur);
655
+ cur = lm_ggml_mul_mat(ctx0, model.mm_2_w, cur);
656
+ if (model.mm_2_b) {
657
+ cur = lm_ggml_add(ctx0, cur, model.mm_2_b);
658
+ }
659
+ }
660
+
661
+ // arrangement of the [IMG_BREAK] token
662
+ {
663
+ // not efficient, but works
664
+ // the trick is to view the embeddings as a 3D tensor with shape [n_embd, n_patches_per_row, n_rows]
665
+ // and then concatenate the [IMG_BREAK] token to the end of each row, aka n_patches_per_row dimension
666
+ // after the concatenation, we have a tensor with shape [n_embd, n_patches_per_row + 1, n_rows]
667
+
668
+ const int p_y = n_merge > 0 ? n_patches_y / n_merge : n_patches_y;
669
+ const int p_x = n_merge > 0 ? n_patches_x / n_merge : n_patches_x;
670
+ const int p_total = p_x * p_y;
671
+ const int n_embd_text = cur->ne[0];
672
+ const int n_tokens_output = p_total + p_y - 1; // one [IMG_BREAK] per row, except the last row
673
+
674
+ lm_ggml_tensor * tmp = lm_ggml_reshape_3d(ctx0, cur, n_embd_text, p_x, p_y);
675
+ lm_ggml_tensor * tok = lm_ggml_new_tensor_3d(ctx0, tmp->type, n_embd_text, 1, p_y);
676
+ tok = lm_ggml_scale(ctx0, tok, 0.0); // clear the tensor
677
+ tok = lm_ggml_add(ctx0, tok, model.token_embd_img_break);
678
+ tmp = lm_ggml_concat(ctx0, tmp, tok, 1);
679
+ cur = lm_ggml_view_2d(ctx0, tmp,
680
+ n_embd_text, n_tokens_output,
681
+ lm_ggml_row_size(tmp->type, n_embd_text), 0);
682
+ }
683
+
684
+ // build the graph
685
+ lm_ggml_build_forward_expand(gf, cur);
686
+
687
+ return gf;
688
+ }
689
+
690
+ // Qwen2VL and Qwen2.5VL use M-RoPE
691
+ lm_ggml_cgraph * build_qwen2vl() {
692
+ LM_GGML_ASSERT(model.patch_bias == nullptr);
693
+ LM_GGML_ASSERT(model.class_embedding == nullptr);
694
+
695
+ const int batch_size = 1;
696
+ const bool use_window_attn = hparams.n_wa_pattern > 0;
697
+ const int n_wa_pattern = hparams.n_wa_pattern;
698
+ const int n_pos = n_patches;
699
+ const int num_position_ids = n_pos * 4; // m-rope requires 4 dim per position
700
+
701
+ norm_type norm_t = ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL
702
+ ? NORM_TYPE_RMS // qwen 2.5 vl
703
+ : NORM_TYPE_NORMAL; // qwen 2 vl
704
+
705
+ int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
706
+
707
+ lm_ggml_tensor * inp_raw = build_inp_raw();
708
+ lm_ggml_tensor * inp = lm_ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
709
+
710
+ LM_GGML_ASSERT(img.nx % (patch_size * 2) == 0);
711
+ LM_GGML_ASSERT(img.ny % (patch_size * 2) == 0);
712
+
713
+ // second conv dimension
714
+ {
715
+ auto inp_1 = lm_ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
716
+ inp = lm_ggml_add(ctx0, inp, inp_1);
717
+
718
+ inp = lm_ggml_permute(ctx0, inp, 1, 2, 0, 3); // [w, h, c, b] -> [c, w, h, b]
719
+ inp = lm_ggml_cont_4d(
720
+ ctx0, inp,
721
+ n_embd * 2, n_patches_x / 2, n_patches_y, batch_size);
722
+ inp = lm_ggml_reshape_4d(
723
+ ctx0, inp,
724
+ n_embd * 2, n_patches_x / 2, 2, batch_size * (n_patches_y / 2));
725
+ inp = lm_ggml_permute(ctx0, inp, 0, 2, 1, 3);
726
+ inp = lm_ggml_cont_3d(
727
+ ctx0, inp,
728
+ n_embd, n_patches_x * n_patches_y, batch_size);
729
+ }
730
+
731
+ lm_ggml_tensor * inpL = inp;
732
+ lm_ggml_tensor * window_mask = nullptr;
733
+ lm_ggml_tensor * window_idx = nullptr;
734
+ lm_ggml_tensor * inv_window_idx = nullptr;
735
+
736
+ lm_ggml_tensor * positions = lm_ggml_new_tensor_1d(ctx0, LM_GGML_TYPE_I32, num_position_ids);
737
+ lm_ggml_set_name(positions, "positions");
738
+ lm_ggml_set_input(positions);
739
+
740
+ // pre-layernorm
741
+ if (model.pre_ln_w) {
742
+ inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
743
+ }
744
+
745
+ if (use_window_attn) {
746
+ // handle window attention inputs
747
+ inv_window_idx = lm_ggml_new_tensor_1d(ctx0, LM_GGML_TYPE_I32, n_pos / 4);
748
+ lm_ggml_set_name(inv_window_idx, "inv_window_idx");
749
+ lm_ggml_set_input(inv_window_idx);
750
+ // mask for window attention
751
+ window_mask = lm_ggml_new_tensor_2d(ctx0, LM_GGML_TYPE_F32, n_pos, n_pos);
752
+ lm_ggml_set_name(window_mask, "window_mask");
753
+ lm_ggml_set_input(window_mask);
754
+
755
+ // inpL shape: [n_embd, n_patches_x * n_patches_y, batch_size]
756
+ LM_GGML_ASSERT(batch_size == 1);
757
+ inpL = lm_ggml_reshape_2d(ctx0, inpL, n_embd * 4, n_patches_x * n_patches_y * batch_size / 4);
758
+ inpL = lm_ggml_get_rows(ctx0, inpL, inv_window_idx);
759
+ inpL = lm_ggml_reshape_3d(ctx0, inpL, n_embd, n_patches_x * n_patches_y, batch_size);
760
+ }
761
+
762
+ // loop over layers
763
+ for (int il = 0; il < n_layer; il++) {
764
+ auto & layer = model.layers[il];
765
+ const bool full_attn = use_window_attn ? (il + 1) % n_wa_pattern == 0 : true;
766
+
767
+ lm_ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
768
+
769
+ // layernorm1
770
+ cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
771
+ cb(cur, "ln1", il);
772
+
773
+ // self-attention
774
+ {
775
+ lm_ggml_tensor * Qcur = lm_ggml_add(ctx0,
776
+ lm_ggml_mul_mat(ctx0, layer.q_w, cur), layer.q_b);
777
+ lm_ggml_tensor * Kcur = lm_ggml_add(ctx0,
778
+ lm_ggml_mul_mat(ctx0, layer.k_w, cur), layer.k_b);
779
+ lm_ggml_tensor * Vcur = lm_ggml_add(ctx0,
780
+ lm_ggml_mul_mat(ctx0, layer.v_w, cur), layer.v_b);
781
+
782
+ Qcur = lm_ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_patches);
783
+ Kcur = lm_ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_patches);
784
+ Vcur = lm_ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_patches);
785
+
786
+ cb(Qcur, "Qcur", il);
787
+ cb(Kcur, "Kcur", il);
788
+ cb(Vcur, "Vcur", il);
789
+
790
+ // apply M-RoPE
791
+ Qcur = lm_ggml_rope_multi(
792
+ ctx0, Qcur, positions, nullptr,
793
+ d_head/2, mrope_sections, LM_GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
794
+ Kcur = lm_ggml_rope_multi(
795
+ ctx0, Kcur, positions, nullptr,
796
+ d_head/2, mrope_sections, LM_GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
797
+
798
+ cb(Qcur, "Qcur_rope", il);
799
+ cb(Kcur, "Kcur_rope", il);
800
+
801
+ lm_ggml_tensor * attn_mask = full_attn ? nullptr : window_mask;
802
+
803
+ cur = build_attn(layer.o_w, layer.o_b,
804
+ Qcur, Kcur, Vcur, attn_mask, kq_scale, il);
805
+ cb(cur, "attn_out", il);
806
+ }
807
+
808
+ // re-add the layer input, e.g., residual
809
+ cur = lm_ggml_add(ctx0, cur, inpL);
810
+
811
+ inpL = cur; // inpL = residual, cur = hidden_states
812
+
813
+ cb(cur, "ffn_inp", il);
814
+
815
+ // layernorm2
816
+ cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
817
+ cb(cur, "ffn_inp_normed", il);
818
+
819
+ // ffn
820
+ cur = build_ffn(cur,
821
+ layer.ff_up_w, layer.ff_up_b,
822
+ layer.ff_gate_w, layer.ff_gate_b,
823
+ layer.ff_down_w, layer.ff_down_b,
824
+ hparams.ffn_op, il);
825
+
826
+ cb(cur, "ffn_out", il);
827
+
828
+ // residual 2
829
+ cur = lm_ggml_add(ctx0, inpL, cur);
830
+ cb(cur, "layer_out", il);
831
+
832
+ inpL = cur;
833
+ }
834
+
835
+ // post-layernorm
836
+ if (model.post_ln_w) {
837
+ inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, n_layer);
838
+ }
839
+
840
+ // multimodal projection
841
+ lm_ggml_tensor * embeddings = inpL;
842
+ embeddings = lm_ggml_reshape_3d(ctx0, embeddings, n_embd * 4, n_pos / 4, batch_size);
843
+
844
+ embeddings = lm_ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
845
+ embeddings = lm_ggml_add(ctx0, embeddings, model.mm_0_b);
846
+
847
+ // GELU activation
848
+ embeddings = lm_ggml_gelu(ctx0, embeddings);
849
+
850
+ // Second linear layer
851
+ embeddings = lm_ggml_mul_mat(ctx0, model.mm_1_w, embeddings);
852
+ embeddings = lm_ggml_add(ctx0, embeddings, model.mm_1_b);
853
+
854
+ if (use_window_attn) {
855
+ window_idx = lm_ggml_new_tensor_1d(ctx0, LM_GGML_TYPE_I32, n_pos / 4);
856
+ lm_ggml_set_name(window_idx, "window_idx");
857
+ lm_ggml_set_input(window_idx);
858
+
859
+ // embeddings shape: [n_embd, n_patches_x * n_patches_y, batch_size]
860
+ LM_GGML_ASSERT(batch_size == 1);
861
+ embeddings = lm_ggml_reshape_2d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4);
862
+ embeddings = lm_ggml_get_rows(ctx0, embeddings, window_idx);
863
+ embeddings = lm_ggml_reshape_3d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4, batch_size);
864
+ }
865
+
866
+ // build the graph
867
+ lm_ggml_build_forward_expand(gf, embeddings);
868
+
869
+ return gf;
870
+ }
871
+
872
+ lm_ggml_cgraph * build_minicpmv() {
873
+ const int batch_size = 1;
874
+
875
+ LM_GGML_ASSERT(model.class_embedding == nullptr);
876
+ const int n_pos = n_patches;
877
+
878
+ // position embeddings for the projector (not for ViT)
879
+ int n_output_dim = clip_n_mmproj_embd(ctx);
880
+ lm_ggml_tensor * pos_embed = lm_ggml_new_tensor_3d(ctx0, LM_GGML_TYPE_F32, n_output_dim, n_pos, batch_size);
881
+ lm_ggml_set_name(pos_embed, "pos_embed");
882
+ lm_ggml_set_input(pos_embed);
883
+
884
+ // for selecting learned pos embd, used by ViT
885
+ struct lm_ggml_tensor * positions = lm_ggml_new_tensor_1d(ctx0, LM_GGML_TYPE_I32, n_pos);
886
+ lm_ggml_set_name(positions, "positions");
887
+ lm_ggml_set_input(positions);
888
+
889
+ lm_ggml_tensor * learned_pos_embd = lm_ggml_get_rows(ctx0, model.position_embeddings, positions);
890
+
891
+ lm_ggml_tensor * inp = build_inp();
892
+ lm_ggml_tensor * embeddings = build_vit(
893
+ inp, n_patches,
894
+ NORM_TYPE_NORMAL,
895
+ hparams.ffn_op,
896
+ learned_pos_embd,
897
+ nullptr);
898
+
899
+ // resampler projector (it is just another transformer)
900
+
901
+ lm_ggml_tensor * q = model.mm_model_query;
902
+ lm_ggml_tensor * v = lm_ggml_mul_mat(ctx0, model.mm_model_kv_proj, embeddings);
903
+
904
+ // norm
905
+ q = build_norm(q, model.mm_model_ln_q_w, model.mm_model_ln_q_b, NORM_TYPE_NORMAL, eps, -1);
906
+ v = build_norm(v, model.mm_model_ln_kv_w, model.mm_model_ln_kv_b, NORM_TYPE_NORMAL, eps, -1);
907
+
908
+ // k = v + pos_embed
909
+ lm_ggml_tensor * k = lm_ggml_add(ctx0, v, pos_embed);
910
+
911
+ // attention
912
+ {
913
+ int n_embd = clip_n_mmproj_embd(ctx);
914
+ const int d_head = 128;
915
+ int n_head = n_embd/d_head;
916
+ // Use actual config value if available, otherwise fall back to hardcoded values
917
+ int num_query = ctx->model.hparams.minicpmv_query_num;
918
+ lm_ggml_tensor * Q = lm_ggml_add(ctx0,
919
+ lm_ggml_mul_mat(ctx0, model.mm_model_attn_q_w, q),
920
+ model.mm_model_attn_q_b);
921
+ lm_ggml_tensor * K = lm_ggml_add(ctx0,
922
+ lm_ggml_mul_mat(ctx0, model.mm_model_attn_k_w, k),
923
+ model.mm_model_attn_k_b);
924
+ lm_ggml_tensor * V = lm_ggml_add(ctx0,
925
+ lm_ggml_mul_mat(ctx0, model.mm_model_attn_v_w, v),
926
+ model.mm_model_attn_v_b);
927
+
928
+ Q = lm_ggml_reshape_3d(ctx0, Q, d_head, n_head, num_query);
929
+ K = lm_ggml_reshape_3d(ctx0, K, d_head, n_head, n_pos);
930
+ V = lm_ggml_reshape_3d(ctx0, V, d_head, n_head, n_pos);
931
+
932
+ cb(Q, "resampler_Q", -1);
933
+ cb(K, "resampler_K", -1);
934
+ cb(V, "resampler_V", -1);
935
+
936
+ embeddings = build_attn(
937
+ model.mm_model_attn_o_w,
938
+ model.mm_model_attn_o_b,
939
+ Q, K, V, nullptr, kq_scale, -1);
940
+ cb(embeddings, "resampler_attn_out", -1);
941
+ }
942
+ // layernorm
943
+ embeddings = build_norm(embeddings, model.mm_model_ln_post_w, model.mm_model_ln_post_b, NORM_TYPE_NORMAL, eps, -1);
944
+
945
+ // projection
946
+ embeddings = lm_ggml_mul_mat(ctx0, model.mm_model_proj, embeddings);
947
+
948
+ // build the graph
949
+ lm_ggml_build_forward_expand(gf, embeddings);
950
+
951
+ return gf;
952
+ }
953
+
954
+ lm_ggml_cgraph * build_internvl() {
955
+ LM_GGML_ASSERT(model.class_embedding != nullptr);
956
+ LM_GGML_ASSERT(model.position_embeddings != nullptr);
957
+
958
+ const int n_pos = n_patches + 1;
959
+ lm_ggml_tensor * inp = build_inp();
960
+
961
+ // add CLS token
962
+ inp = lm_ggml_concat(ctx0, inp, model.class_embedding, 1);
963
+
964
+ // The larger models use a different ViT, which uses RMS norm instead of layer norm
965
+ // ref: https://github.com/ggml-org/llama.cpp/pull/13443#issuecomment-2869786188
966
+ norm_type norm_t = (hparams.n_embd == 3200 && hparams.n_layer == 45)
967
+ ? NORM_TYPE_RMS // 6B ViT (Used by InternVL 2.5/3 - 26B, 38B, 78B)
968
+ : NORM_TYPE_NORMAL; // 300M ViT (Used by all smaller InternVL models)
969
+
970
+ lm_ggml_tensor * cur = build_vit(
971
+ inp, n_pos,
972
+ norm_t,
973
+ hparams.ffn_op,
974
+ model.position_embeddings,
975
+ nullptr);
976
+
977
+ // remove CLS token
978
+ cur = lm_ggml_view_2d(ctx0, cur,
979
+ n_embd, n_patches,
980
+ lm_ggml_row_size(cur->type, n_embd), 0);
981
+
982
+ // pixel shuffle
983
+ {
984
+ const int scale_factor = model.hparams.proj_scale_factor;
985
+ const int bsz = 1; // batch size, always 1 for now since we don't support batching
986
+ const int height = n_patches_y;
987
+ const int width = n_patches_x;
988
+ LM_GGML_ASSERT(scale_factor > 0);
989
+ cur = lm_ggml_reshape_4d(ctx0, cur, n_embd * scale_factor, height / scale_factor, width, bsz);
990
+ cur = lm_ggml_permute(ctx0, cur, 0, 2, 1, 3);
991
+ cur = lm_ggml_cont_4d(ctx0, cur,
992
+ n_embd * scale_factor * scale_factor,
993
+ height / scale_factor,
994
+ width / scale_factor,
995
+ bsz);
996
+ cur = lm_ggml_permute(ctx0, cur, 0, 2, 1, 3);
997
+ // flatten to 2D
998
+ cur = lm_ggml_cont_2d(ctx0, cur,
999
+ n_embd * scale_factor * scale_factor,
1000
+ cur->ne[1] * cur->ne[2]);
1001
+ }
1002
+
1003
+ // projector (always using GELU activation)
1004
+ {
1005
+ // projector LayerNorm uses pytorch's default eps = 1e-5
1006
+ // ref: https://huggingface.co/OpenGVLab/InternVL3-8B-Instruct/blob/a34d3e4e129a5856abfd6aa6de79776484caa14e/modeling_internvl_chat.py#L79
1007
+ cur = build_norm(cur, model.mm_0_w, model.mm_0_b, NORM_TYPE_NORMAL, 1e-5, -1);
1008
+ cur = lm_ggml_mul_mat(ctx0, model.mm_1_w, cur);
1009
+ cur = lm_ggml_add(ctx0, cur, model.mm_1_b);
1010
+ cur = lm_ggml_gelu(ctx0, cur);
1011
+ cur = lm_ggml_mul_mat(ctx0, model.mm_3_w, cur);
1012
+ cur = lm_ggml_add(ctx0, cur, model.mm_3_b);
1013
+ }
1014
+
1015
+ // build the graph
1016
+ lm_ggml_build_forward_expand(gf, cur);
1017
+
1018
+ return gf;
1019
+ }
1020
+
1021
+ lm_ggml_cgraph * build_llama4() {
1022
+ LM_GGML_ASSERT(model.class_embedding != nullptr);
1023
+ LM_GGML_ASSERT(model.position_embeddings != nullptr);
1024
+
1025
+ const int n_pos = n_patches + 1; // +1 for [CLS]
1026
+
1027
+ // 2D input positions
1028
+ lm_ggml_tensor * pos_h = lm_ggml_new_tensor_1d(ctx0, LM_GGML_TYPE_I32, n_pos);
1029
+ lm_ggml_set_name(pos_h, "pos_h");
1030
+ lm_ggml_set_input(pos_h);
1031
+
1032
+ lm_ggml_tensor * pos_w = lm_ggml_new_tensor_1d(ctx0, LM_GGML_TYPE_I32, n_pos);
1033
+ lm_ggml_set_name(pos_w, "pos_w");
1034
+ lm_ggml_set_input(pos_w);
1035
+
1036
+ lm_ggml_tensor * inp = build_inp_raw();
1037
+
1038
+ // Llama4UnfoldConvolution
1039
+ {
1040
+ lm_ggml_tensor * kernel = lm_ggml_reshape_4d(ctx0, model.patch_embeddings_0,
1041
+ patch_size, patch_size, 3, n_embd);
1042
+ inp = lm_ggml_im2col(ctx0, kernel, inp, patch_size, patch_size, 0, 0, 1, 1, true, inp->type);
1043
+ inp = lm_ggml_mul_mat(ctx0, model.patch_embeddings_0, inp);
1044
+ inp = lm_ggml_reshape_2d(ctx0, inp, n_embd, n_patches);
1045
+ cb(inp, "patch_conv", -1);
1046
+ }
1047
+
1048
+ // add CLS token
1049
+ inp = lm_ggml_concat(ctx0, inp, model.class_embedding, 1);
1050
+
1051
+ // build ViT with 2D position embeddings
1052
+ auto add_pos = [&](lm_ggml_tensor * cur, const clip_layer &) {
1053
+ // first half is X axis and second half is Y axis
1054
+ // ref: https://github.com/huggingface/transformers/blob/40a493c7ed4f19f08eadb0639cf26d49bfa5e180/src/transformers/models/llama4/modeling_llama4.py#L1312
1055
+ // ref: https://github.com/Blaizzy/mlx-vlm/blob/a57156aa87b33cca6e5ee6cfc14dd4ef8f611be6/mlx_vlm/models/llama4/vision.py#L441
1056
+ return build_rope_2d(ctx0, cur, pos_w, pos_h, hparams.rope_theta, false);
1057
+ };
1058
+ lm_ggml_tensor * cur = build_vit(
1059
+ inp, n_pos,
1060
+ NORM_TYPE_NORMAL,
1061
+ hparams.ffn_op,
1062
+ model.position_embeddings,
1063
+ add_pos);
1064
+
1065
+ // remove CLS token
1066
+ cur = lm_ggml_view_2d(ctx0, cur,
1067
+ n_embd, n_patches,
1068
+ lm_ggml_row_size(cur->type, n_embd), 0);
1069
+
1070
+ // pixel shuffle
1071
+ // based on Llama4VisionPixelShuffleMLP
1072
+ // https://github.com/huggingface/transformers/blob/2932f318a20d9e54cc7aea052e040164d85de7d6/src/transformers/models/llama4/modeling_llama4.py#L1151
1073
+ {
1074
+ const int scale_factor = model.hparams.proj_scale_factor;
1075
+ const int bsz = 1; // batch size, always 1 for now since we don't support batching
1076
+ LM_GGML_ASSERT(scale_factor > 0);
1077
+ LM_GGML_ASSERT(n_patches_x == n_patches_y); // llama4 only supports square images
1078
+ cur = lm_ggml_reshape_4d(ctx0, cur,
1079
+ n_embd * scale_factor,
1080
+ n_patches_x / scale_factor,
1081
+ n_patches_y,
1082
+ bsz);
1083
+ cur = lm_ggml_permute(ctx0, cur, 0, 2, 1, 3);
1084
+ cur = lm_ggml_cont_4d(ctx0, cur,
1085
+ n_embd * scale_factor * scale_factor,
1086
+ n_patches_x / scale_factor,
1087
+ n_patches_y / scale_factor,
1088
+ bsz);
1089
+ cur = lm_ggml_permute(ctx0, cur, 0, 2, 1, 3);
1090
+ // flatten to 2D
1091
+ cur = lm_ggml_cont_2d(ctx0, cur,
1092
+ n_embd * scale_factor * scale_factor,
1093
+ n_patches / scale_factor / scale_factor);
1094
+ cb(cur, "pixel_shuffle", -1);
1095
+ }
1096
+
1097
+ // based on Llama4VisionMLP2 (always uses GELU activation, no bias)
1098
+ {
1099
+ cur = lm_ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, cur);
1100
+ cur = lm_ggml_gelu(ctx0, cur);
1101
+ cur = lm_ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, cur);
1102
+ cur = lm_ggml_gelu(ctx0, cur);
1103
+ cb(cur, "adapter_mlp", -1);
1104
+ }
1105
+
1106
+ // Llama4MultiModalProjector
1107
+ cur = lm_ggml_mul_mat(ctx0, model.mm_model_proj, cur);
1108
+ cb(cur, "projected", -1);
1109
+
1110
+ // build the graph
1111
+ lm_ggml_build_forward_expand(gf, cur);
1112
+
1113
+ return gf;
1114
+ }
1115
+
1116
+ // this graph is used by llava, granite and glm
1117
+ // due to having embedding_stack (used by granite), we cannot reuse build_vit
1118
+ lm_ggml_cgraph * build_llava() {
1119
+ const int batch_size = 1;
1120
+ const int n_pos = n_patches + (model.class_embedding ? 1 : 0);
1121
+
1122
+ LM_GGML_ASSERT(n_patches_x == n_patches_y && "only square images supported");
1123
+
1124
+ // Calculate the deepest feature layer based on hparams and projector type
1125
+ int max_feature_layer = n_layer;
1126
+ {
1127
+ // Get the index of the second to last layer; this is the default for models that have a llava projector
1128
+ int il_last = hparams.n_layer - 1;
1129
+ int deepest_feature_layer = -1;
1130
+
1131
+ if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV || ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) {
1132
+ il_last += 1;
1133
+ }
1134
+
1135
+ // If we set explicit vision feature layers, only go up to the deepest one
1136
+ // NOTE: only used by granite-vision models for now
1137
+ for (const auto & feature_layer : hparams.vision_feature_layer) {
1138
+ if (feature_layer > deepest_feature_layer) {
1139
+ deepest_feature_layer = feature_layer;
1140
+ }
1141
+ }
1142
+ max_feature_layer = deepest_feature_layer < 0 ? il_last : deepest_feature_layer;
1143
+ }
1144
+
1145
+ lm_ggml_tensor * inp = build_inp();
1146
+
1147
+ // concat class_embeddings and patch_embeddings
1148
+ if (model.class_embedding) {
1149
+ inp = lm_ggml_concat(ctx0, inp, model.class_embedding, 1);
1150
+ }
1151
+
1152
+ lm_ggml_tensor * positions = lm_ggml_new_tensor_1d(ctx0, LM_GGML_TYPE_I32, n_pos);
1153
+ lm_ggml_set_name(positions, "positions");
1154
+ lm_ggml_set_input(positions);
1155
+
1156
+ inp = lm_ggml_add(ctx0, inp, lm_ggml_get_rows(ctx0, model.position_embeddings, positions));
1157
+
1158
+ lm_ggml_tensor * inpL = inp;
1159
+
1160
+ // pre-layernorm
1161
+ if (model.pre_ln_w) {
1162
+ inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, NORM_TYPE_NORMAL, eps, -1);
1163
+ cb(inpL, "pre_ln", -1);
1164
+ }
1165
+
1166
+ std::vector<lm_ggml_tensor *> embedding_stack;
1167
+ const auto & vision_feature_layer = hparams.vision_feature_layer;
1168
+
1169
+ // loop over layers
1170
+ for (int il = 0; il < max_feature_layer; il++) {
1171
+ auto & layer = model.layers[il];
1172
+ lm_ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
1173
+
1174
+ // If this is an embedding feature layer, save the output.
1175
+ // NOTE: 0 index here refers to the input to the encoder.
1176
+ if (vision_feature_layer.find(il) != vision_feature_layer.end()) {
1177
+ embedding_stack.push_back(cur);
1178
+ }
1179
+
1180
+ // layernorm1
1181
+ cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, NORM_TYPE_NORMAL, eps, il);
1182
+ cb(cur, "layer_inp_normed", il);
1183
+
1184
+ // self-attention
1185
+ {
1186
+ lm_ggml_tensor * Qcur = lm_ggml_mul_mat(ctx0, layer.q_w, cur);
1187
+ if (layer.q_b) {
1188
+ Qcur = lm_ggml_add(ctx0, Qcur, layer.q_b);
1189
+ }
1190
+
1191
+ lm_ggml_tensor * Kcur = lm_ggml_mul_mat(ctx0, layer.k_w, cur);
1192
+ if (layer.k_b) {
1193
+ Kcur = lm_ggml_add(ctx0, Kcur, layer.k_b);
1194
+ }
1195
+
1196
+ lm_ggml_tensor * Vcur = lm_ggml_mul_mat(ctx0, layer.v_w, cur);
1197
+ if (layer.v_b) {
1198
+ Vcur = lm_ggml_add(ctx0, Vcur, layer.v_b);
1199
+ }
1200
+
1201
+ Qcur = lm_ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
1202
+ Kcur = lm_ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
1203
+ Vcur = lm_ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
1204
+
1205
+ cb(Qcur, "Qcur", il);
1206
+ cb(Kcur, "Kcur", il);
1207
+ cb(Vcur, "Vcur", il);
1208
+
1209
+ cur = build_attn(layer.o_w, layer.o_b,
1210
+ Qcur, Kcur, Vcur, nullptr, kq_scale, il);
1211
+ cb(cur, "attn_out", il);
1212
+ }
1213
+
1214
+ // re-add the layer input, e.g., residual
1215
+ cur = lm_ggml_add(ctx0, cur, inpL);
1216
+
1217
+ inpL = cur; // inpL = residual, cur = hidden_states
1218
+
1219
+ cb(cur, "ffn_inp", il);
1220
+
1221
+ // layernorm2
1222
+ cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, NORM_TYPE_NORMAL, eps, il);
1223
+ cb(cur, "ffn_inp_normed", il);
1224
+
1225
+ // ffn
1226
+ cur = build_ffn(cur,
1227
+ layer.ff_up_w, layer.ff_up_b,
1228
+ layer.ff_gate_w, layer.ff_gate_b,
1229
+ layer.ff_down_w, layer.ff_down_b,
1230
+ hparams.ffn_op, il);
1231
+
1232
+ cb(cur, "ffn_out", il);
1233
+
1234
+ // residual 2
1235
+ cur = lm_ggml_add(ctx0, inpL, cur);
1236
+ cb(cur, "layer_out", il);
1237
+
1238
+ inpL = cur;
1239
+ }
1240
+
1241
+ // post-layernorm
1242
+ if (model.post_ln_w) {
1243
+ inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, NORM_TYPE_NORMAL, eps, -1);
1244
+ }
1245
+
1246
+ lm_ggml_tensor * embeddings = inpL;
1247
+
1248
+ // process vision feature layers (used by granite)
1249
+ {
1250
+ // final layer is a vision feature layer
1251
+ if (vision_feature_layer.find(max_feature_layer) != vision_feature_layer.end()) {
1252
+ embedding_stack.push_back(inpL);
1253
+ }
1254
+
1255
+ // If feature layers are explicitly set, stack them (if we have multiple)
1256
+ if (!embedding_stack.empty()) {
1257
+ embeddings = embedding_stack[0];
1258
+ for (size_t i = 1; i < embedding_stack.size(); i++) {
1259
+ embeddings = lm_ggml_concat(ctx0, embeddings, embedding_stack[i], 0);
1260
+ }
1261
+ }
1262
+ }
1263
+
1264
+ // llava projector (also used by granite)
1265
+ if (ctx->model.hparams.has_llava_projector) {
1266
+ embeddings = lm_ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
1267
+
1268
+ lm_ggml_tensor * patches = lm_ggml_new_tensor_1d(ctx0, LM_GGML_TYPE_I32, n_patches);
1269
+ lm_ggml_set_name(patches, "patches");
1270
+ lm_ggml_set_input(patches);
1271
+
1272
+ // shape [1, 576, 1024]
1273
+ // ne is whcn, ne = [1024, 576, 1, 1]
1274
+ embeddings = lm_ggml_get_rows(ctx0, embeddings, patches);
1275
+
1276
+ // print_tensor_info(embeddings, "embeddings");
1277
+
1278
+ // llava projector
1279
+ if (ctx->proj_type() == PROJECTOR_TYPE_MLP) {
1280
+ embeddings = lm_ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
1281
+ embeddings = lm_ggml_add(ctx0, embeddings, model.mm_0_b);
1282
+
1283
+ embeddings = lm_ggml_gelu(ctx0, embeddings);
1284
+ if (model.mm_2_w) {
1285
+ embeddings = lm_ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
1286
+ embeddings = lm_ggml_add(ctx0, embeddings, model.mm_2_b);
1287
+ }
1288
+ }
1289
+ else if (ctx->proj_type() == PROJECTOR_TYPE_MLP_NORM) {
1290
+ embeddings = lm_ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
1291
+ embeddings = lm_ggml_add(ctx0, embeddings, model.mm_0_b);
1292
+ // lm_ggml_tensor_printf(embeddings, "mm_0_w",0,true,false);
1293
+ // First LayerNorm
1294
+ embeddings = lm_ggml_norm(ctx0, embeddings, eps);
1295
+ embeddings = lm_ggml_add(ctx0, lm_ggml_mul(ctx0, embeddings, model.mm_1_w),
1296
+ model.mm_1_b);
1297
+
1298
+ // GELU activation
1299
+ embeddings = lm_ggml_gelu(ctx0, embeddings);
1300
+
1301
+ // Second linear layer
1302
+ embeddings = lm_ggml_mul_mat(ctx0, model.mm_3_w, embeddings);
1303
+ embeddings = lm_ggml_add(ctx0, embeddings, model.mm_3_b);
1304
+
1305
+ // Second LayerNorm
1306
+ embeddings = lm_ggml_norm(ctx0, embeddings, eps);
1307
+ embeddings = lm_ggml_add(ctx0, lm_ggml_mul(ctx0, embeddings, model.mm_4_w),
1308
+ model.mm_4_b);
1309
+ }
1310
+ else if (ctx->proj_type() == PROJECTOR_TYPE_LDP) {
1311
+ // MobileVLM projector
1312
+ int n_patch = 24;
1313
+ lm_ggml_tensor * mlp_1 = lm_ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings);
1314
+ mlp_1 = lm_ggml_add(ctx0, mlp_1, model.mm_model_mlp_1_b);
1315
+ mlp_1 = lm_ggml_gelu(ctx0, mlp_1);
1316
+ lm_ggml_tensor * mlp_3 = lm_ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, mlp_1);
1317
+ mlp_3 = lm_ggml_add(ctx0, mlp_3, model.mm_model_mlp_3_b);
1318
+ // mlp_3 shape = [1, 576, 2048], ne = [2048, 576, 1, 1]
1319
+
1320
+ // block 1
1321
+ lm_ggml_tensor * block_1 = nullptr;
1322
+ {
1323
+ // transpose from [1, 576, 2048] --> [1, 2048, 576] --> [1, 2048, 24, 24]
1324
+ mlp_3 = lm_ggml_permute(ctx0, mlp_3, 1, 0, 2, 3);
1325
+ mlp_3 = lm_ggml_cont_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
1326
+ // stride = 1, padding = 1, bias is nullptr
1327
+ block_1 = lm_ggml_conv_2d_dw(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1);
1328
+
1329
+ // layer norm
1330
+ // // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
1331
+ block_1 = lm_ggml_cont(ctx0, lm_ggml_permute(ctx0, block_1, 1, 2, 0, 3));
1332
+ // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
1333
+ block_1 = lm_ggml_norm(ctx0, block_1, eps);
1334
+ block_1 = lm_ggml_add(ctx0, lm_ggml_mul(ctx0, block_1, model.mm_model_block_1_block_0_1_w), model.mm_model_block_1_block_0_1_b);
1335
+ block_1 = lm_ggml_cont(ctx0, lm_ggml_permute(ctx0, block_1, 2, 0, 1, 3));
1336
+
1337
+ // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
1338
+ // hardswish
1339
+ lm_ggml_tensor * block_1_hw = lm_ggml_hardswish(ctx0, block_1);
1340
+
1341
+ block_1 = lm_ggml_pool_2d(ctx0, block_1_hw, LM_GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
1342
+ // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
1343
+ // pointwise conv
1344
+ block_1 = lm_ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
1345
+ block_1 = lm_ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc1_w, block_1);
1346
+ block_1 = lm_ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc1_b);
1347
+ block_1 = lm_ggml_relu(ctx0, block_1);
1348
+ block_1 = lm_ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc2_w, block_1);
1349
+ block_1 = lm_ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc2_b);
1350
+ block_1 = lm_ggml_hardsigmoid(ctx0, block_1);
1351
+ // block_1_hw shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1], block_1 shape = [1, 2048], ne = [2048, 1, 1, 1]
1352
+ block_1 = lm_ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
1353
+ block_1 = lm_ggml_mul(ctx0, block_1_hw, block_1);
1354
+
1355
+ int w = block_1->ne[0], h = block_1->ne[1];
1356
+ block_1 = lm_ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
1357
+ block_1 = lm_ggml_cont(ctx0, lm_ggml_permute(ctx0, block_1, 1, 0, 2, 3));
1358
+
1359
+ // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
1360
+ block_1 = lm_ggml_mul_mat(ctx0, model.mm_model_block_1_block_2_0_w, block_1);
1361
+ block_1 = lm_ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
1362
+
1363
+ // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
1364
+ block_1 = lm_ggml_norm(ctx0, block_1, eps);
1365
+ block_1 = lm_ggml_add(ctx0, lm_ggml_mul(ctx0, block_1, model.mm_model_block_1_block_2_1_w), model.mm_model_block_1_block_2_1_b);
1366
+ block_1 = lm_ggml_cont(ctx0, lm_ggml_permute(ctx0, block_1, 2, 0, 1, 3));
1367
+ // block1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
1368
+ // residual
1369
+ block_1 = lm_ggml_add(ctx0, mlp_3, block_1);
1370
+ }
1371
+
1372
+ // block_2
1373
+ {
1374
+ // stride = 2
1375
+ block_1 = lm_ggml_conv_2d_dw(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1);
1376
+
1377
+ // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
1378
+ // layer norm
1379
+ block_1 = lm_ggml_cont(ctx0, lm_ggml_permute(ctx0, block_1, 1, 2, 0, 3));
1380
+ // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
1381
+ block_1 = lm_ggml_norm(ctx0, block_1, eps);
1382
+ block_1 = lm_ggml_add(ctx0, lm_ggml_mul(ctx0, block_1, model.mm_model_block_2_block_0_1_w), model.mm_model_block_2_block_0_1_b);
1383
+ block_1 = lm_ggml_cont(ctx0, lm_ggml_permute(ctx0, block_1, 2, 0, 1, 3));
1384
+ // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
1385
+ // hardswish
1386
+ lm_ggml_tensor * block_1_hw = lm_ggml_hardswish(ctx0, block_1);
1387
+
1388
+ // not sure the parameters is right for globalAvgPooling
1389
+ block_1 = lm_ggml_pool_2d(ctx0, block_1_hw, LM_GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
1390
+ // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
1391
+ // pointwise conv
1392
+ block_1 = lm_ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
1393
+ block_1 = lm_ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc1_w, block_1);
1394
+ block_1 = lm_ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc1_b);
1395
+ block_1 = lm_ggml_relu(ctx0, block_1);
1396
+ block_1 = lm_ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc2_w, block_1);
1397
+ block_1 = lm_ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc2_b);
1398
+ block_1 = lm_ggml_hardsigmoid(ctx0, block_1);
1399
+
1400
+ // block_1_hw shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1], block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
1401
+ block_1 = lm_ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
1402
+ block_1 = lm_ggml_mul(ctx0, block_1_hw, block_1);
1403
+
1404
+ int w = block_1->ne[0], h = block_1->ne[1];
1405
+ block_1 = lm_ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
1406
+ block_1 = lm_ggml_cont(ctx0, lm_ggml_permute(ctx0, block_1, 1, 0, 2, 3));
1407
+ // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
1408
+ block_1 = lm_ggml_mul_mat(ctx0, model.mm_model_block_2_block_2_0_w, block_1);
1409
+ block_1 = lm_ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
1410
+
1411
+
1412
+ // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
1413
+ block_1 = lm_ggml_norm(ctx0, block_1, eps);
1414
+ block_1 = lm_ggml_add(ctx0, lm_ggml_mul(ctx0, block_1, model.mm_model_block_2_block_2_1_w), model.mm_model_block_2_block_2_1_b);
1415
+ block_1 = lm_ggml_reshape_3d(ctx0, block_1, block_1->ne[0], block_1->ne[1] * block_1->ne[2], block_1->ne[3]);
1416
+ // block_1 shape = [1, 144, 2048], ne = [2048, 144, 1]
1417
+ }
1418
+ embeddings = block_1;
1419
+ }
1420
+ else if (ctx->proj_type() == PROJECTOR_TYPE_LDPV2)
1421
+ {
1422
+ int n_patch = 24;
1423
+ lm_ggml_tensor * mlp_0 = lm_ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
1424
+ mlp_0 = lm_ggml_add(ctx0, mlp_0, model.mm_model_mlp_0_b);
1425
+ mlp_0 = lm_ggml_gelu(ctx0, mlp_0);
1426
+ lm_ggml_tensor * mlp_2 = lm_ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, mlp_0);
1427
+ mlp_2 = lm_ggml_add(ctx0, mlp_2, model.mm_model_mlp_2_b);
1428
+ // mlp_2 ne = [2048, 576, 1, 1]
1429
+ // // AVG Pool Layer 2*2, strides = 2
1430
+ mlp_2 = lm_ggml_permute(ctx0, mlp_2, 1, 0, 2, 3);
1431
+ // mlp_2 ne = [576, 2048, 1, 1]
1432
+ mlp_2 = lm_ggml_cont_4d(ctx0, mlp_2, n_patch, n_patch, mlp_2->ne[1], mlp_2->ne[2]);
1433
+ // mlp_2 ne [24, 24, 2048, 1]
1434
+ mlp_2 = lm_ggml_pool_2d(ctx0, mlp_2, LM_GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0);
1435
+ // weight ne = [3, 3, 2048, 1]
1436
+ lm_ggml_tensor * peg_0 = lm_ggml_conv_2d_dw(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1);
1437
+ peg_0 = lm_ggml_cont(ctx0, lm_ggml_permute(ctx0, peg_0, 1, 2, 0, 3));
1438
+ peg_0 = lm_ggml_add(ctx0, peg_0, model.mm_model_peg_0_b);
1439
+ mlp_2 = lm_ggml_cont(ctx0, lm_ggml_permute(ctx0, mlp_2, 1, 2, 0, 3));
1440
+ peg_0 = lm_ggml_add(ctx0, peg_0, mlp_2);
1441
+ peg_0 = lm_ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]);
1442
+ embeddings = peg_0;
1443
+ }
1444
+ else {
1445
+ LM_GGML_ABORT("fatal error");
1446
+ }
1447
+ }
1448
+
1449
+ // glm projector
1450
+ else if (ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) {
1451
+ size_t gridsz = (size_t)sqrt(embeddings->ne[1]);
1452
+ embeddings = lm_ggml_permute(ctx0,embeddings,1,0,2,3);
1453
+ embeddings = lm_ggml_cont_3d(ctx0, embeddings, gridsz, gridsz, embeddings->ne[1]);
1454
+ embeddings = lm_ggml_conv_2d(ctx0, model.mm_model_adapter_conv_w, embeddings, 2, 2, 0, 0, 1, 1);
1455
+ embeddings = lm_ggml_reshape_3d(ctx0, embeddings,embeddings->ne[0]*embeddings->ne[1] , embeddings->ne[2], batch_size);
1456
+ embeddings = lm_ggml_cont(ctx0, lm_ggml_permute(ctx0,embeddings, 1, 0, 2, 3));
1457
+ embeddings = lm_ggml_add(ctx0, embeddings, model.mm_model_adapter_conv_b);
1458
+ // GLU
1459
+ {
1460
+ embeddings = lm_ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
1461
+ embeddings = lm_ggml_norm(ctx0, embeddings, eps);
1462
+ embeddings = lm_ggml_add(ctx0, lm_ggml_mul(ctx0, embeddings, model.mm_model_ln_q_w), model.mm_model_ln_q_b);
1463
+ embeddings = lm_ggml_gelu_inplace(ctx0, embeddings);
1464
+ lm_ggml_tensor * x = embeddings;
1465
+ embeddings = lm_ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, embeddings);
1466
+ x = lm_ggml_mul_mat(ctx0, model.mm_model_mlp_1_w,x);
1467
+ embeddings = lm_ggml_swiglu_split(ctx0, embeddings, x);
1468
+ embeddings = lm_ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings);
1469
+ }
1470
+ // arrangement of BOI/EOI token embeddings
1471
+ // note: these embeddings are not present in text model, hence we cannot process them as text tokens
1472
+ // see: https://huggingface.co/THUDM/glm-edge-v-2b/blob/main/siglip.py#L53
1473
+ {
1474
+ embeddings = lm_ggml_concat(ctx0, model.mm_glm_tok_boi, embeddings, 1); // BOI
1475
+ embeddings = lm_ggml_concat(ctx0, embeddings, model.mm_glm_tok_eoi, 1); // EOI
1476
+ }
1477
+ }
1478
+
1479
+ else {
1480
+ LM_GGML_ABORT("llava: unknown projector type");
1481
+ }
1482
+
1483
+ // build the graph
1484
+ lm_ggml_build_forward_expand(gf, embeddings);
1485
+
1486
+ return gf;
1487
+ }
1488
+
1489
+ // whisper encoder with custom projector
1490
+ lm_ggml_cgraph * build_whisper_enc() {
1491
+ const int n_frames = img.nx;
1492
+ const int n_pos = n_frames / 2;
1493
+ LM_GGML_ASSERT(model.position_embeddings->ne[1] >= n_pos);
1494
+
1495
+ lm_ggml_tensor * inp = build_inp_raw(1);
1496
+
1497
+ // conv1d block
1498
+ {
1499
+ // convolution + gelu
1500
+ lm_ggml_tensor * cur = lm_ggml_conv_1d_ph(ctx0, model.conv1d_1_w, inp, 1, 1);
1501
+ cur = lm_ggml_add(ctx0, cur, model.conv1d_1_b);
1502
+
1503
+ cur = lm_ggml_gelu_erf(ctx0, cur);
1504
+
1505
+ cur = lm_ggml_conv_1d_ph(ctx0, model.conv1d_2_w, cur, 2, 1);
1506
+ cur = lm_ggml_add(ctx0, cur, model.conv1d_2_b);
1507
+
1508
+ cur = lm_ggml_gelu_erf(ctx0, cur);
1509
+ // transpose
1510
+ inp = lm_ggml_cont(ctx0, lm_ggml_transpose(ctx0, cur));
1511
+ cb(inp, "after_conv1d", -1);
1512
+ }
1513
+
1514
+ // sanity check (only check one layer, but it should be the same for all)
1515
+ LM_GGML_ASSERT(model.layers[0].ln_1_w && model.layers[0].ln_1_b);
1516
+ LM_GGML_ASSERT(model.layers[0].ln_2_w && model.layers[0].ln_2_b);
1517
+ LM_GGML_ASSERT(model.layers[0].q_b);
1518
+ LM_GGML_ASSERT(model.layers[0].v_b);
1519
+ LM_GGML_ASSERT(!model.layers[0].k_b); // no bias for k
1520
+ LM_GGML_ASSERT(model.post_ln_w && model.post_ln_b);
1521
+
1522
+ lm_ggml_tensor * pos_embd_selected = lm_ggml_view_2d(
1523
+ ctx0, model.position_embeddings,
1524
+ model.position_embeddings->ne[0], n_pos,
1525
+ model.position_embeddings->nb[1], 0
1526
+ );
1527
+ lm_ggml_tensor * cur = build_vit(
1528
+ inp, n_pos,
1529
+ NORM_TYPE_NORMAL,
1530
+ hparams.ffn_op,
1531
+ pos_embd_selected,
1532
+ nullptr);
1533
+
1534
+ cb(cur, "after_transformer", -1);
1535
+
1536
+ if (model.audio_has_stack_frames()) {
1537
+ // StackAudioFrames
1538
+ // https://huggingface.co/fixie-ai/ultravox-v0_5-llama-3_2-1b/blob/main/ultravox_model.py
1539
+ int64_t stride = n_embd * hparams.proj_stack_factor;
1540
+ int64_t padded_len = LM_GGML_PAD(lm_ggml_nelements(cur), stride);
1541
+ int64_t pad = padded_len - lm_ggml_nelements(cur);
1542
+ if (pad > 0) {
1543
+ cur = lm_ggml_view_1d(ctx0, cur, lm_ggml_nelements(cur), 0);
1544
+ cur = lm_ggml_pad(ctx0, cur, pad, 0, 0, 0);
1545
+ }
1546
+ cur = lm_ggml_view_2d(ctx0, cur, stride, padded_len / stride,
1547
+ lm_ggml_row_size(cur->type, stride), 0);
1548
+ cb(cur, "after_stacked", -1);
1549
+ }
1550
+
1551
+ if (ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX) {
1552
+ // UltravoxProjector
1553
+ // pre-norm
1554
+ cur = lm_ggml_rms_norm(ctx0, cur, 1e-6);
1555
+ cur = lm_ggml_mul(ctx0, cur, model.mm_norm_pre_w);
1556
+
1557
+ // ffn in
1558
+ cur = lm_ggml_mul_mat(ctx0, model.mm_1_w, cur);
1559
+
1560
+ // swiglu
1561
+ // see SwiGLU in ultravox_model.py, the second half passed through is silu, not the first half
1562
+ cur = lm_ggml_swiglu_swapped(ctx0, cur);
1563
+
1564
+ // mid-norm
1565
+ cur = lm_ggml_rms_norm(ctx0, cur, 1e-6);
1566
+ cur = lm_ggml_mul(ctx0, cur, model.mm_norm_mid_w);
1567
+
1568
+ // ffn out
1569
+ cur = lm_ggml_mul_mat(ctx0, model.mm_2_w, cur);
1570
+
1571
+ } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2A) {
1572
+ // projector
1573
+ cur = lm_ggml_mul_mat(ctx0, model.mm_fc_w, cur);
1574
+ cur = lm_ggml_add(ctx0, cur, model.mm_fc_b);
1575
+
1576
+ } else if (ctx->proj_type() == PROJECTOR_TYPE_VOXTRAL) {
1577
+ // projector
1578
+ cur = lm_ggml_mul_mat(ctx0, model.mm_1_w, cur);
1579
+ cur = lm_ggml_gelu_erf(ctx0, cur);
1580
+ cur = lm_ggml_mul_mat(ctx0, model.mm_2_w, cur);
1581
+
1582
+ } else {
1583
+ LM_GGML_ABORT("%s: unknown projector type", __func__);
1584
+ }
1585
+
1586
+ cb(cur, "projected", -1);
1587
+
1588
+ lm_ggml_build_forward_expand(gf, cur);
1589
+
1590
+ return gf;
1591
+ }
1592
+
1593
+ private:
1594
+ //
1595
+ // utility functions
1596
+ //
1597
+
1598
+ void cb(lm_ggml_tensor * cur0, const char * name, int il) const {
1599
+ if (ctx->debug_graph) {
1600
+ lm_ggml_tensor * cur = lm_ggml_cpy(ctx0, cur0, lm_ggml_dup_tensor(ctx0, cur0));
1601
+ std::string cur_name = il >= 0 ? std::string(name) + "_" + std::to_string(il) : name;
1602
+ lm_ggml_set_name(cur, cur_name.c_str());
1603
+ lm_ggml_set_output(cur);
1604
+ lm_ggml_build_forward_expand(gf, cur);
1605
+ ctx->debug_print_tensors.push_back(cur);
1606
+ }
1607
+ }
1608
+
1609
+ // siglip2 naflex
1610
+ lm_ggml_tensor * resize_position_embeddings() {
1611
+ lm_ggml_tensor * pos_embd = model.position_embeddings;
1612
+ const int height = img.ny / patch_size;
1613
+ const int width = img.nx / patch_size;
1614
+
1615
+ if (!pos_embd || height * width == pos_embd->ne[1]) {
1616
+ return pos_embd;
1617
+ }
1618
+
1619
+ const int n_pos_embd = std::sqrt(pos_embd->ne[1]);
1620
+ pos_embd = lm_ggml_reshape_3d(ctx0, pos_embd, n_embd, n_pos_embd, n_pos_embd); // -> (n_embd, n_pos_embd, n_pos_embd)
1621
+ pos_embd = lm_ggml_permute(ctx0, pos_embd, 2, 0, 1, 3); // -> (n_pos_embd, n_pos_embd, n_embd)
1622
+ pos_embd = lm_ggml_interpolate(ctx0, pos_embd, width, height, n_embd, 1, 1); // -> (width, height, n_embd)
1623
+ pos_embd = lm_ggml_reshape_2d(ctx0, pos_embd, height * width, n_embd); // -> (height * width, n_embd)
1624
+ pos_embd = lm_ggml_transpose(ctx0, pos_embd); // -> (n_embd, height * width)
1625
+ pos_embd = lm_ggml_cont(ctx0, pos_embd);
1626
+
1627
+ return pos_embd;
1628
+ }
1629
+
1630
+ // build vision transformer (ViT) cgraph
1631
+ // this function should cover most of the models
1632
+ // if your model has specific features, you should probably duplicate this function
1633
+ lm_ggml_tensor * build_vit(
1634
+ lm_ggml_tensor * inp,
1635
+ int64_t n_pos,
1636
+ norm_type norm_t,
1637
+ ffn_op_type ffn_t,
1638
+ lm_ggml_tensor * learned_pos_embd,
1639
+ std::function<lm_ggml_tensor *(lm_ggml_tensor *, const clip_layer &)> add_pos
1640
+ ) {
1641
+ if (learned_pos_embd) {
1642
+ inp = lm_ggml_add(ctx0, inp, learned_pos_embd);
1643
+ cb(inp, "pos_embed", -1);
1644
+ }
1645
+
1646
+ lm_ggml_tensor * inpL = inp;
1647
+
1648
+ // pre-layernorm
1649
+ if (model.pre_ln_w) {
1650
+ inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
1651
+ cb(inpL, "pre_ln", -1);
1652
+ }
1653
+
1654
+ // loop over layers
1655
+ for (int il = 0; il < n_layer; il++) {
1656
+ auto & layer = model.layers[il];
1657
+ lm_ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
1658
+
1659
+ // layernorm1
1660
+ cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
1661
+ cb(cur, "layer_inp_normed", il);
1662
+
1663
+ // self-attention
1664
+ {
1665
+ lm_ggml_tensor * Qcur = lm_ggml_mul_mat(ctx0, layer.q_w, cur);
1666
+ if (layer.q_b) {
1667
+ Qcur = lm_ggml_add(ctx0, Qcur, layer.q_b);
1668
+ }
1669
+
1670
+ lm_ggml_tensor * Kcur = lm_ggml_mul_mat(ctx0, layer.k_w, cur);
1671
+ if (layer.k_b) {
1672
+ Kcur = lm_ggml_add(ctx0, Kcur, layer.k_b);
1673
+ }
1674
+
1675
+ lm_ggml_tensor * Vcur = lm_ggml_mul_mat(ctx0, layer.v_w, cur);
1676
+ if (layer.v_b) {
1677
+ Vcur = lm_ggml_add(ctx0, Vcur, layer.v_b);
1678
+ }
1679
+
1680
+ if (layer.q_norm) {
1681
+ Qcur = build_norm(Qcur, layer.q_norm, NULL, norm_t, eps, il);
1682
+ cb(Qcur, "Qcur_norm", il);
1683
+ }
1684
+
1685
+ if (layer.k_norm) {
1686
+ Kcur = build_norm(Kcur, layer.k_norm, NULL, norm_t, eps, il);
1687
+ cb(Kcur, "Kcur_norm", il);
1688
+ }
1689
+
1690
+ Qcur = lm_ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
1691
+ Kcur = lm_ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
1692
+ Vcur = lm_ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
1693
+
1694
+ cb(Qcur, "Qcur", il);
1695
+ cb(Kcur, "Kcur", il);
1696
+ cb(Vcur, "Vcur", il);
1697
+
1698
+ if (add_pos) {
1699
+ Qcur = add_pos(Qcur, layer);
1700
+ Kcur = add_pos(Kcur, layer);
1701
+ cb(Qcur, "Qcur_pos", il);
1702
+ cb(Kcur, "Kcur_pos", il);
1703
+ }
1704
+
1705
+ cur = build_attn(layer.o_w, layer.o_b,
1706
+ Qcur, Kcur, Vcur, nullptr, kq_scale, il);
1707
+ cb(cur, "attn_out", il);
1708
+ }
1709
+
1710
+ if (layer.ls_1_w) {
1711
+ cur = lm_ggml_mul(ctx0, cur, layer.ls_1_w);
1712
+ cb(cur, "attn_out_scaled", il);
1713
+ }
1714
+
1715
+ // re-add the layer input, e.g., residual
1716
+ cur = lm_ggml_add(ctx0, cur, inpL);
1717
+
1718
+ inpL = cur; // inpL = residual, cur = hidden_states
1719
+
1720
+ cb(cur, "ffn_inp", il);
1721
+
1722
+ // layernorm2
1723
+ cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
1724
+ cb(cur, "ffn_inp_normed", il);
1725
+
1726
+ // ffn
1727
+ cur = build_ffn(cur,
1728
+ layer.ff_up_w, layer.ff_up_b,
1729
+ layer.ff_gate_w, layer.ff_gate_b,
1730
+ layer.ff_down_w, layer.ff_down_b,
1731
+ ffn_t, il);
1732
+
1733
+ cb(cur, "ffn_out", il);
1734
+
1735
+ if (layer.ls_2_w) {
1736
+ cur = lm_ggml_mul(ctx0, cur, layer.ls_2_w);
1737
+ cb(cur, "ffn_out_scaled", il);
1738
+ }
1739
+
1740
+ // residual 2
1741
+ cur = lm_ggml_add(ctx0, inpL, cur);
1742
+ cb(cur, "layer_out", il);
1743
+
1744
+ inpL = cur;
1745
+ }
1746
+
1747
+ if (ctx->model.audio_has_avgpool()) {
1748
+ lm_ggml_tensor * cur = inpL;
1749
+ cur = lm_ggml_transpose(ctx0, cur);
1750
+ cur = lm_ggml_cont(ctx0, cur);
1751
+ cur = lm_ggml_pool_1d(ctx0, cur, LM_GGML_OP_POOL_AVG, 2, 2, 0);
1752
+ cur = lm_ggml_transpose(ctx0, cur);
1753
+ cur = lm_ggml_cont(ctx0, cur);
1754
+ inpL = cur;
1755
+ }
1756
+
1757
+ // post-layernorm
1758
+ if (model.post_ln_w) {
1759
+ inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, -1);
1760
+ }
1761
+ return inpL;
1762
+ }
1763
+
1764
+ // build the input after conv2d (inp_raw --> patches)
1765
+ // returns tensor with shape [n_embd, n_patches]
1766
+ lm_ggml_tensor * build_inp() {
1767
+ lm_ggml_tensor * inp_raw = build_inp_raw();
1768
+ lm_ggml_tensor * inp = lm_ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
1769
+ inp = lm_ggml_reshape_2d(ctx0, inp, n_patches, n_embd);
1770
+ inp = lm_ggml_cont(ctx0, lm_ggml_transpose(ctx0, inp));
1771
+ if (model.patch_bias) {
1772
+ inp = lm_ggml_add(ctx0, inp, model.patch_bias);
1773
+ cb(inp, "patch_bias", -1);
1774
+ }
1775
+ return inp;
1776
+ }
1777
+
1778
+ lm_ggml_tensor * build_inp_raw(int channels = 3) {
1779
+ lm_ggml_tensor * inp_raw = lm_ggml_new_tensor_3d(ctx0, LM_GGML_TYPE_F32, img.nx, img.ny, channels);
1780
+ lm_ggml_set_name(inp_raw, "inp_raw");
1781
+ lm_ggml_set_input(inp_raw);
1782
+ return inp_raw;
1783
+ }
1784
+
1785
+ lm_ggml_tensor * build_norm(
1786
+ lm_ggml_tensor * cur,
1787
+ lm_ggml_tensor * mw,
1788
+ lm_ggml_tensor * mb,
1789
+ norm_type type,
1790
+ float norm_eps,
1791
+ int il) const {
1792
+
1793
+ cur = type == NORM_TYPE_RMS
1794
+ ? lm_ggml_rms_norm(ctx0, cur, norm_eps)
1795
+ : lm_ggml_norm(ctx0, cur, norm_eps);
1796
+
1797
+ if (mw || mb) {
1798
+ cb(cur, "norm", il);
1799
+ }
1800
+
1801
+ if (mw) {
1802
+ cur = lm_ggml_mul(ctx0, cur, mw);
1803
+ if (mb) {
1804
+ cb(cur, "norm_w", il);
1805
+ }
1806
+ }
1807
+
1808
+ if (mb) {
1809
+ cur = lm_ggml_add(ctx0, cur, mb);
1810
+ }
1811
+
1812
+ return cur;
1813
+ }
1814
+
1815
+ lm_ggml_tensor * build_ffn(
1816
+ lm_ggml_tensor * cur,
1817
+ lm_ggml_tensor * up,
1818
+ lm_ggml_tensor * up_b,
1819
+ lm_ggml_tensor * gate,
1820
+ lm_ggml_tensor * gate_b,
1821
+ lm_ggml_tensor * down,
1822
+ lm_ggml_tensor * down_b,
1823
+ ffn_op_type type_op,
1824
+ int il) const {
1825
+
1826
+ lm_ggml_tensor * tmp = up ? lm_ggml_mul_mat(ctx0, up, cur) : cur;
1827
+ cb(tmp, "ffn_up", il);
1828
+
1829
+ if (up_b) {
1830
+ tmp = lm_ggml_add(ctx0, tmp, up_b);
1831
+ cb(tmp, "ffn_up_b", il);
1832
+ }
1833
+
1834
+ if (gate) {
1835
+ cur = lm_ggml_mul_mat(ctx0, gate, cur);
1836
+ cb(cur, "ffn_gate", il);
1837
+
1838
+ if (gate_b) {
1839
+ cur = lm_ggml_add(ctx0, cur, gate_b);
1840
+ cb(cur, "ffn_gate_b", il);
1841
+ }
1842
+ } else {
1843
+ cur = tmp;
1844
+ }
1845
+
1846
+ // we only support parallel ffn for now
1847
+ switch (type_op) {
1848
+ case FFN_SILU:
1849
+ if (gate) {
1850
+ cur = lm_ggml_swiglu_split(ctx0, cur, tmp);
1851
+ cb(cur, "ffn_swiglu", il);
1852
+ } else {
1853
+ cur = lm_ggml_silu(ctx0, cur);
1854
+ cb(cur, "ffn_silu", il);
1855
+ } break;
1856
+ case FFN_GELU:
1857
+ if (gate) {
1858
+ cur = lm_ggml_geglu_split(ctx0, cur, tmp);
1859
+ cb(cur, "ffn_geglu", il);
1860
+ } else {
1861
+ cur = lm_ggml_gelu(ctx0, cur);
1862
+ cb(cur, "ffn_gelu", il);
1863
+ } break;
1864
+ case FFN_GELU_ERF:
1865
+ if (gate) {
1866
+ cur = lm_ggml_geglu_erf_split(ctx0, cur, tmp);
1867
+ cb(cur, "ffn_geglu_erf", il);
1868
+ } else {
1869
+ cur = lm_ggml_gelu_erf(ctx0, cur);
1870
+ cb(cur, "ffn_gelu_erf", il);
1871
+ } break;
1872
+ case FFN_GELU_QUICK:
1873
+ if (gate) {
1874
+ cur = lm_ggml_geglu_quick_split(ctx0, cur, tmp);
1875
+ cb(cur, "ffn_geglu_quick", il);
1876
+ } else {
1877
+ cur = lm_ggml_gelu_quick(ctx0, cur);
1878
+ cb(cur, "ffn_gelu_quick", il);
1879
+ } break;
1880
+ }
1881
+
1882
+ if (down) {
1883
+ cur = lm_ggml_mul_mat(ctx0, down, cur);
1884
+ }
1885
+
1886
+ if (down_b) {
1887
+ cb(cur, "ffn_down", il);
1888
+ }
1889
+
1890
+ if (down_b) {
1891
+ cur = lm_ggml_add(ctx0, cur, down_b);
1892
+ }
1893
+
1894
+ return cur;
1895
+ }
1896
+
1897
+ lm_ggml_tensor * build_attn(
1898
+ lm_ggml_tensor * wo,
1899
+ lm_ggml_tensor * wo_b,
1900
+ lm_ggml_tensor * q_cur,
1901
+ lm_ggml_tensor * k_cur,
1902
+ lm_ggml_tensor * v_cur,
1903
+ lm_ggml_tensor * kq_mask,
1904
+ float kq_scale,
1905
+ int il) const {
1906
+ // these nodes are added to the graph together so that they are not reordered
1907
+ // by doing so, the number of splits in the graph is reduced
1908
+ lm_ggml_build_forward_expand(gf, q_cur);
1909
+ lm_ggml_build_forward_expand(gf, k_cur);
1910
+ lm_ggml_build_forward_expand(gf, v_cur);
1911
+
1912
+ lm_ggml_tensor * q = lm_ggml_permute(ctx0, q_cur, 0, 2, 1, 3);
1913
+ //cb(q, "q", il);
1914
+
1915
+ lm_ggml_tensor * k = lm_ggml_permute(ctx0, k_cur, 0, 2, 1, 3);
1916
+ //cb(k, "k", il);
1917
+
1918
+ lm_ggml_tensor * v = lm_ggml_permute(ctx0, v_cur, 1, 2, 0, 3);
1919
+ v = lm_ggml_cont(ctx0, v);
1920
+ //cb(k, "v", il);
1921
+
1922
+ lm_ggml_tensor * cur;
1923
+
1924
+ // TODO @ngxson : support flash attention
1925
+ {
1926
+ const auto n_tokens = q->ne[1];
1927
+ const auto n_head = q->ne[2];
1928
+ // const auto n_kv = k->ne[1]; // for flash attention
1929
+
1930
+ lm_ggml_tensor * kq = lm_ggml_mul_mat(ctx0, k, q);
1931
+ // F32 may not needed for vision encoders?
1932
+ // lm_ggml_mul_mat_set_prec(kq, LM_GGML_PREC_F32);
1933
+
1934
+ kq = lm_ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, 0.0f);
1935
+
1936
+ lm_ggml_tensor * kqv = lm_ggml_mul_mat(ctx0, v, kq);
1937
+ cur = lm_ggml_permute(ctx0, kqv, 0, 2, 1, 3);
1938
+ cur = lm_ggml_cont_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens);
1939
+ }
1940
+
1941
+ cb(cur, "kqv_out", il);
1942
+
1943
+ if (wo) {
1944
+ cur = lm_ggml_mul_mat(ctx0, wo, cur);
1945
+ }
1946
+
1947
+ if (wo_b) {
1948
+ cur = lm_ggml_add(ctx0, cur, wo_b);
1949
+ }
1950
+
1951
+ return cur;
1952
+ }
1953
+
1954
+ // implementation of the 2D RoPE without adding a new op in ggml
1955
+ // this is not efficient (use double the memory), but works on all backends
1956
+ // TODO: there was a more efficient which relies on lm_ggml_view and lm_ggml_rope_ext_inplace, but the rope inplace does not work well with non-contiguous tensors ; we should fix that and revert back to the original implementation in https://github.com/ggml-org/llama.cpp/pull/13065
1957
+ static lm_ggml_tensor * build_rope_2d(
1958
+ lm_ggml_context * ctx0,
1959
+ lm_ggml_tensor * cur,
1960
+ lm_ggml_tensor * pos_a, // first half
1961
+ lm_ggml_tensor * pos_b, // second half
1962
+ const float freq_base,
1963
+ const bool interleave_freq
1964
+ ) {
1965
+ const int64_t n_dim = cur->ne[0];
1966
+ const int64_t n_head = cur->ne[1];
1967
+ const int64_t n_pos = cur->ne[2];
1968
+
1969
+ // for example, if we have cur tensor of shape (n_dim=8, n_head, n_pos)
1970
+ // we will have a list of 4 inv_freq: 1e-0, 1e-1, 1e-2, 1e-3
1971
+ // first half of cur will use 1e-0, 1e-2 (even)
1972
+ // second half of cur will use 1e-1, 1e-3 (odd)
1973
+ // the trick here is to rotate just half of n_dim, so inv_freq will automatically be even
1974
+ // ^ don't ask me why, it's math! -2(2i) / n_dim == -2i / (n_dim/2)
1975
+ // then for the second half, we use freq_scale to shift the inv_freq
1976
+ // ^ why? replace (2i) with (2i+1) in the above equation
1977
+ const float freq_scale_odd = interleave_freq
1978
+ ? std::pow(freq_base, (float)-2/n_dim)
1979
+ : 1.0;
1980
+
1981
+ // first half
1982
+ lm_ggml_tensor * first;
1983
+ {
1984
+ first = lm_ggml_view_3d(ctx0, cur,
1985
+ n_dim/2, n_head, n_pos,
1986
+ lm_ggml_row_size(cur->type, n_dim),
1987
+ lm_ggml_row_size(cur->type, n_dim*n_head),
1988
+ 0);
1989
+ first = lm_ggml_rope_ext(
1990
+ ctx0,
1991
+ first,
1992
+ pos_a, // positions
1993
+ nullptr, // freq factors
1994
+ n_dim/2, // n_dims
1995
+ 0, 0, freq_base,
1996
+ 1.0f, 0.0f, 1.0f, 0.0f, 0.0f
1997
+ );
1998
+ }
1999
+
2000
+ // second half
2001
+ lm_ggml_tensor * second;
2002
+ {
2003
+ second = lm_ggml_view_3d(ctx0, cur,
2004
+ n_dim/2, n_head, n_pos,
2005
+ lm_ggml_row_size(cur->type, n_dim),
2006
+ lm_ggml_row_size(cur->type, n_dim*n_head),
2007
+ n_dim/2 * lm_ggml_element_size(cur));
2008
+ second = lm_ggml_rope_ext(
2009
+ ctx0,
2010
+ second,
2011
+ pos_b, // positions
2012
+ nullptr, // freq factors
2013
+ n_dim/2, // n_dims
2014
+ 0, 0, freq_base,
2015
+ freq_scale_odd,
2016
+ 0.0f, 1.0f, 0.0f, 0.0f
2017
+ );
2018
+ }
2019
+
2020
+ cur = lm_ggml_concat(ctx0, first, second, 0);
2021
+ return cur;
2022
+ }
2023
+
2024
+ };
2025
+
2026
+ static lm_ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch & imgs) {
2027
+ LM_GGML_ASSERT(imgs.entries.size() == 1 && "n_batch > 1 is not supported");
2028
+ clip_graph graph(ctx, *imgs.entries[0]);
2029
+
2030
+ lm_ggml_cgraph * res;
2031
+
2032
+ switch (ctx->proj_type()) {
2033
+ case PROJECTOR_TYPE_GEMMA3:
2034
+ case PROJECTOR_TYPE_IDEFICS3:
2035
+ case PROJECTOR_TYPE_LFM2:
2036
+ {
2037
+ res = graph.build_siglip();
2038
+ } break;
2039
+ case PROJECTOR_TYPE_PIXTRAL:
2040
+ {
2041
+ res = graph.build_pixtral();
2042
+ } break;
2043
+ case PROJECTOR_TYPE_QWEN2VL:
2044
+ case PROJECTOR_TYPE_QWEN25VL:
2045
+ {
2046
+ res = graph.build_qwen2vl();
2047
+ } break;
2048
+ case PROJECTOR_TYPE_MINICPMV:
2049
+ {
2050
+ res = graph.build_minicpmv();
2051
+ } break;
2052
+ case PROJECTOR_TYPE_INTERNVL:
2053
+ {
2054
+ res = graph.build_internvl();
2055
+ } break;
2056
+ case PROJECTOR_TYPE_LLAMA4:
2057
+ {
2058
+ res = graph.build_llama4();
2059
+ } break;
2060
+ case PROJECTOR_TYPE_ULTRAVOX:
2061
+ case PROJECTOR_TYPE_VOXTRAL:
2062
+ case PROJECTOR_TYPE_QWEN2A:
2063
+ {
2064
+ res = graph.build_whisper_enc();
2065
+ } break;
2066
+ default:
2067
+ {
2068
+ res = graph.build_llava();
2069
+ } break;
2070
+ }
2071
+ return res;
2072
+ }
2073
+
2074
+ struct clip_model_loader {
2075
+ lm_ggml_context_ptr ctx_meta;
2076
+ lm_gguf_context_ptr ctx_gguf;
2077
+
2078
+ std::string fname;
2079
+
2080
+ size_t model_size = 0; // in bytes
2081
+
2082
+ bool has_vision = false;
2083
+ bool has_audio = false;
2084
+
2085
+ // TODO @ngxson : we should not pass clip_ctx here, it should be clip_model
2086
+ clip_model_loader(const char * fname) : fname(fname) {
2087
+ struct lm_ggml_context * meta = nullptr;
2088
+
2089
+ struct lm_gguf_init_params params = {
2090
+ /*.no_alloc = */ true,
2091
+ /*.ctx = */ &meta,
2092
+ };
2093
+
2094
+ ctx_gguf = lm_gguf_context_ptr(lm_gguf_init_from_file(fname, params));
2095
+ if (!ctx_gguf.get()) {
2096
+ throw std::runtime_error(string_format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname));
2097
+ }
2098
+
2099
+ ctx_meta.reset(meta);
2100
+
2101
+ const int n_tensors = lm_gguf_get_n_tensors(ctx_gguf.get());
2102
+
2103
+ // print gguf info
2104
+ {
2105
+ std::string name;
2106
+ get_string(KEY_NAME, name, false);
2107
+ std::string description;
2108
+ get_string(KEY_DESCRIPTION, description, false);
2109
+ LOG_INF("%s: model name: %s\n", __func__, name.c_str());
2110
+ LOG_INF("%s: description: %s\n", __func__, description.c_str());
2111
+ LOG_INF("%s: GGUF version: %d\n", __func__, lm_gguf_get_version(ctx_gguf.get()));
2112
+ LOG_INF("%s: alignment: %zu\n", __func__, lm_gguf_get_alignment(ctx_gguf.get()));
2113
+ LOG_INF("%s: n_tensors: %d\n", __func__, n_tensors);
2114
+ LOG_INF("%s: n_kv: %d\n", __func__, (int)lm_gguf_get_n_kv(ctx_gguf.get()));
2115
+ LOG_INF("\n");
2116
+ }
2117
+
2118
+ // modalities
2119
+ {
2120
+ get_bool(KEY_HAS_VISION_ENC, has_vision, false);
2121
+ get_bool(KEY_HAS_AUDIO_ENC, has_audio, false);
2122
+
2123
+ if (has_vision) {
2124
+ LOG_INF("%s: has vision encoder\n", __func__);
2125
+ }
2126
+ if (has_audio) {
2127
+ LOG_INF("%s: has audio encoder\n", __func__);
2128
+ }
2129
+ }
2130
+
2131
+ // tensors
2132
+ {
2133
+ for (int i = 0; i < n_tensors; ++i) {
2134
+ const char * name = lm_gguf_get_tensor_name(ctx_gguf.get(), i);
2135
+ const size_t offset = lm_gguf_get_tensor_offset(ctx_gguf.get(), i);
2136
+ enum lm_ggml_type type = lm_gguf_get_tensor_type(ctx_gguf.get(), i);
2137
+ lm_ggml_tensor * cur = lm_ggml_get_tensor(meta, name);
2138
+ size_t tensor_size = lm_ggml_nbytes(cur);
2139
+ model_size += tensor_size;
2140
+ LOG_DBG("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
2141
+ __func__, i, lm_ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], lm_ggml_type_name(type));
2142
+ }
2143
+ }
2144
+ }
2145
+
2146
+ void load_hparams(clip_model & model, clip_modality modality) {
2147
+ auto & hparams = model.hparams;
2148
+ std::string log_ffn_op; // for logging
2149
+
2150
+ // sanity check
2151
+ if (modality == CLIP_MODALITY_VISION) {
2152
+ LM_GGML_ASSERT(has_vision);
2153
+ } else if (modality == CLIP_MODALITY_AUDIO) {
2154
+ LM_GGML_ASSERT(has_audio);
2155
+ }
2156
+ model.modality = modality;
2157
+
2158
+
2159
+ // projector type
2160
+ std::string proj_type;
2161
+ {
2162
+ get_string(KEY_PROJ_TYPE, proj_type, false);
2163
+ if (!proj_type.empty()) {
2164
+ model.proj_type = clip_projector_type_from_string(proj_type);
2165
+ }
2166
+ if (model.proj_type == PROJECTOR_TYPE_UNKNOWN) {
2167
+ throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str()));
2168
+ }
2169
+
2170
+ // correct arch for multimodal models
2171
+ if (model.proj_type == PROJECTOR_TYPE_QWEN25O) {
2172
+ model.proj_type = modality == CLIP_MODALITY_VISION
2173
+ ? PROJECTOR_TYPE_QWEN25VL
2174
+ : PROJECTOR_TYPE_QWEN2A;
2175
+ }
2176
+ }
2177
+
2178
+ const bool is_vision = model.modality == CLIP_MODALITY_VISION;
2179
+ const bool is_audio = model.modality == CLIP_MODALITY_AUDIO;
2180
+
2181
+ // other hparams
2182
+ {
2183
+ const char * prefix = is_vision ? "vision" : "audio";
2184
+ get_u32(string_format(KEY_N_EMBD, prefix), hparams.n_embd);
2185
+ get_u32(string_format(KEY_N_HEAD, prefix), hparams.n_head);
2186
+ get_u32(string_format(KEY_N_FF, prefix), hparams.n_ff);
2187
+ get_u32(string_format(KEY_N_BLOCK, prefix), hparams.n_layer);
2188
+ get_u32(string_format(KEY_PROJ_DIM, prefix), hparams.projection_dim);
2189
+ get_f32(string_format(KEY_LAYER_NORM_EPS, prefix), hparams.eps);
2190
+
2191
+ if (is_vision) {
2192
+ get_u32(KEY_IMAGE_SIZE, hparams.image_size);
2193
+ get_u32(KEY_PATCH_SIZE, hparams.patch_size);
2194
+ get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false);
2195
+ get_i32(KEY_MINICPMV_VERSION, hparams.minicpmv_version, false); // legacy
2196
+ get_u32(KEY_MINICPMV_QUERY_NUM, hparams.minicpmv_query_num, false);
2197
+ if (hparams.minicpmv_query_num == 0) {
2198
+ // Fallback to hardcoded values for legacy models
2199
+ if (hparams.minicpmv_version == 3) {
2200
+ hparams.minicpmv_query_num = 64;
2201
+ } else if (hparams.minicpmv_version == 4) {
2202
+ hparams.minicpmv_query_num = 64;
2203
+ } else if (hparams.minicpmv_version == 5) {
2204
+ hparams.minicpmv_query_num = 64;
2205
+ } else {
2206
+ hparams.minicpmv_query_num = 96;
2207
+ }
2208
+ }
2209
+ } else if (is_audio) {
2210
+ get_u32(KEY_A_NUM_MEL_BINS, hparams.n_mel_bins);
2211
+
2212
+ } else {
2213
+ LM_GGML_ASSERT(false && "unknown modality");
2214
+ }
2215
+
2216
+ // for pinpoints, we need to convert it into a list of resolution candidates
2217
+ {
2218
+ std::vector<int> pinpoints;
2219
+ get_arr_int(KEY_IMAGE_GRID_PINPOINTS, pinpoints, false);
2220
+ if (!pinpoints.empty()) {
2221
+ for (size_t i = 0; i < pinpoints.size(); i += 2) {
2222
+ hparams.image_res_candidates.push_back({
2223
+ pinpoints[i],
2224
+ pinpoints[i+1],
2225
+ });
2226
+ }
2227
+ }
2228
+ }
2229
+
2230
+ // default warmup value
2231
+ hparams.warmup_image_size = hparams.image_size;
2232
+
2233
+ hparams.has_llava_projector = model.proj_type == PROJECTOR_TYPE_MLP
2234
+ || model.proj_type == PROJECTOR_TYPE_MLP_NORM
2235
+ || model.proj_type == PROJECTOR_TYPE_LDP
2236
+ || model.proj_type == PROJECTOR_TYPE_LDPV2;
2237
+
2238
+ {
2239
+ bool use_gelu = false;
2240
+ bool use_silu = false;
2241
+ get_bool(KEY_USE_GELU, use_gelu, false);
2242
+ get_bool(KEY_USE_SILU, use_silu, false);
2243
+ if (use_gelu && use_silu) {
2244
+ throw std::runtime_error(string_format("%s: both use_gelu and use_silu are set to true\n", __func__));
2245
+ }
2246
+ if (use_gelu) {
2247
+ hparams.ffn_op = FFN_GELU;
2248
+ log_ffn_op = "gelu";
2249
+ } else if (use_silu) {
2250
+ hparams.ffn_op = FFN_SILU;
2251
+ log_ffn_op = "silu";
2252
+ } else {
2253
+ hparams.ffn_op = FFN_GELU_QUICK;
2254
+ log_ffn_op = "gelu_quick";
2255
+ }
2256
+ }
2257
+
2258
+ {
2259
+ std::string mm_patch_merge_type;
2260
+ get_string(KEY_MM_PATCH_MERGE_TYPE, mm_patch_merge_type, false);
2261
+ if (mm_patch_merge_type == "spatial_unpad") {
2262
+ hparams.mm_patch_merge_type = PATCH_MERGE_SPATIAL_UNPAD;
2263
+ }
2264
+ }
2265
+
2266
+ if (is_vision) {
2267
+ int idx_mean = lm_gguf_find_key(ctx_gguf.get(), KEY_IMAGE_MEAN);
2268
+ int idx_std = lm_gguf_find_key(ctx_gguf.get(), KEY_IMAGE_STD);
2269
+ LM_GGML_ASSERT(idx_mean >= 0 && "image_mean not found");
2270
+ LM_GGML_ASSERT(idx_std >= 0 && "image_std not found");
2271
+ const float * mean_data = (const float *) lm_gguf_get_arr_data(ctx_gguf.get(), idx_mean);
2272
+ const float * std_data = (const float *) lm_gguf_get_arr_data(ctx_gguf.get(), idx_std);
2273
+ for (int i = 0; i < 3; ++i) {
2274
+ hparams.image_mean[i] = mean_data[i];
2275
+ hparams.image_std[i] = std_data[i];
2276
+ }
2277
+ }
2278
+
2279
+ // Load the vision feature layer indices if they are explicitly provided;
2280
+ // if multiple vision feature layers are present, the values will be concatenated
2281
+ // to form the final visual features.
2282
+ // NOTE: gguf conversions should standardize the values of the vision feature layer to
2283
+ // be non-negative, since we use -1 to mark values as unset here.
2284
+ std::vector<int> vision_feature_layer;
2285
+ get_arr_int(KEY_FEATURE_LAYER, vision_feature_layer, false);
2286
+ // convert std::vector to std::unordered_set
2287
+ for (auto & layer : vision_feature_layer) {
2288
+ hparams.vision_feature_layer.insert(layer);
2289
+ }
2290
+
2291
+ // model-specific params
2292
+ switch (model.proj_type) {
2293
+ case PROJECTOR_TYPE_MINICPMV:
2294
+ {
2295
+ if (hparams.minicpmv_version == 0) {
2296
+ hparams.minicpmv_version = 2; // default to 2 if not set
2297
+ }
2298
+ } break;
2299
+ case PROJECTOR_TYPE_IDEFICS3:
2300
+ case PROJECTOR_TYPE_LFM2:
2301
+ case PROJECTOR_TYPE_INTERNVL:
2302
+ {
2303
+ get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor, false);
2304
+ } break;
2305
+ case PROJECTOR_TYPE_PIXTRAL:
2306
+ {
2307
+ hparams.rope_theta = 10000.0f;
2308
+ hparams.warmup_image_size = hparams.patch_size * 8;
2309
+ // Mistral Small 2506 needs 1024x1024 image size cap to prevent OOM
2310
+ // ref: https://github.com/ggml-org/llama.cpp/issues/14310
2311
+ hparams.image_size = 1024;
2312
+ get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.spatial_merge_size, false);
2313
+ } break;
2314
+ case PROJECTOR_TYPE_GEMMA3:
2315
+ {
2316
+ // default value (used by all model sizes in gemma 3 family)
2317
+ // number of patches for each **side** is reduced by a factor of 4
2318
+ hparams.proj_scale_factor = 4;
2319
+ // test model (tinygemma3) has a different value, we optionally read it
2320
+ get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor, false);
2321
+ } break;
2322
+ case PROJECTOR_TYPE_QWEN2VL:
2323
+ {
2324
+ // max image size = sqrt(max_pixels) = 3584
2325
+ // ref: https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct/blob/main/preprocessor_config.json
2326
+ // however, the model use unreasonable memory past 1024 size, we force it to 1024 otherwise it's unusable
2327
+ // ref: https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct/discussions/10
2328
+ hparams.image_size = 1024;
2329
+ hparams.warmup_image_size = hparams.patch_size * 8;
2330
+ } break;
2331
+ case PROJECTOR_TYPE_QWEN25VL:
2332
+ {
2333
+ // max image size = sqrt(max_pixels)
2334
+ // https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct/blob/main/preprocessor_config.json
2335
+ // however, the model use unreasonable memory past 1024 size, we force it to 1024 otherwise it's unusable
2336
+ // ref: https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct/discussions/10
2337
+ hparams.image_size = 1024;
2338
+ hparams.warmup_image_size = hparams.patch_size * 8;
2339
+ get_u32(KEY_WIN_ATTN_PATTERN, hparams.n_wa_pattern);
2340
+ } break;
2341
+ case PROJECTOR_TYPE_LLAMA4:
2342
+ {
2343
+ hparams.rope_theta = 10000.0f;
2344
+ get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor);
2345
+ set_llava_uhd_res_candidates(model, 3);
2346
+ } break;
2347
+ case PROJECTOR_TYPE_ULTRAVOX:
2348
+ case PROJECTOR_TYPE_QWEN2A:
2349
+ case PROJECTOR_TYPE_VOXTRAL:
2350
+ {
2351
+ bool require_stack = model.proj_type == PROJECTOR_TYPE_ULTRAVOX ||
2352
+ model.proj_type == PROJECTOR_TYPE_VOXTRAL;
2353
+ get_u32(KEY_A_PROJ_STACK_FACTOR, hparams.proj_stack_factor, require_stack);
2354
+ if (hparams.n_mel_bins != 128) {
2355
+ throw std::runtime_error(string_format("%s: only 128 mel bins are supported for ultravox\n", __func__));
2356
+ }
2357
+ hparams.ffn_op = FFN_GELU_ERF;
2358
+ log_ffn_op = "gelu_erf"; // temporary solution for logging
2359
+ } break;
2360
+ default:
2361
+ break;
2362
+ }
2363
+
2364
+ LOG_INF("%s: projector: %s\n", __func__, proj_type.c_str());
2365
+ LOG_INF("%s: n_embd: %d\n", __func__, hparams.n_embd);
2366
+ LOG_INF("%s: n_head: %d\n", __func__, hparams.n_head);
2367
+ LOG_INF("%s: n_ff: %d\n", __func__, hparams.n_ff);
2368
+ LOG_INF("%s: n_layer: %d\n", __func__, hparams.n_layer);
2369
+ LOG_INF("%s: ffn_op: %s\n", __func__, log_ffn_op.c_str());
2370
+ LOG_INF("%s: projection_dim: %d\n", __func__, hparams.projection_dim);
2371
+ if (is_vision) {
2372
+ LOG_INF("\n--- vision hparams ---\n");
2373
+ LOG_INF("%s: image_size: %d\n", __func__, hparams.image_size);
2374
+ LOG_INF("%s: patch_size: %d\n", __func__, hparams.patch_size);
2375
+ LOG_INF("%s: has_llava_proj: %d\n", __func__, hparams.has_llava_projector);
2376
+ LOG_INF("%s: minicpmv_version: %d\n", __func__, hparams.minicpmv_version);
2377
+ LOG_INF("%s: proj_scale_factor: %d\n", __func__, hparams.proj_scale_factor);
2378
+ LOG_INF("%s: n_wa_pattern: %d\n", __func__, hparams.n_wa_pattern);
2379
+ } else if (is_audio) {
2380
+ LOG_INF("\n--- audio hparams ---\n");
2381
+ LOG_INF("%s: n_mel_bins: %d\n", __func__, hparams.n_mel_bins);
2382
+ LOG_INF("%s: proj_stack_factor: %d\n", __func__, hparams.proj_stack_factor);
2383
+ }
2384
+ LOG_INF("\n");
2385
+ LOG_INF("%s: model size: %.2f MiB\n", __func__, model_size / 1024.0 / 1024.0);
2386
+ LOG_INF("%s: metadata size: %.2f MiB\n", __func__, lm_ggml_get_mem_size(ctx_meta.get()) / 1024.0 / 1024.0);
2387
+ }
2388
+ }
2389
+
2390
+ void load_tensors(clip_ctx & ctx_clip) {
2391
+ auto & model = ctx_clip.model;
2392
+ auto & hparams = model.hparams;
2393
+ std::map<std::string, size_t> tensor_offset;
2394
+ std::vector<lm_ggml_tensor *> tensors_to_load;
2395
+
2396
+ // TODO @ngxson : support both audio and video in the future
2397
+ const char * prefix = model.modality == CLIP_MODALITY_AUDIO ? "a" : "v";
2398
+
2399
+ // get offsets
2400
+ for (int64_t i = 0; i < lm_gguf_get_n_tensors(ctx_gguf.get()); ++i) {
2401
+ const char * name = lm_gguf_get_tensor_name(ctx_gguf.get(), i);
2402
+ tensor_offset[name] = lm_gguf_get_data_offset(ctx_gguf.get()) + lm_gguf_get_tensor_offset(ctx_gguf.get(), i);
2403
+ }
2404
+
2405
+ // create data context
2406
+ struct lm_ggml_init_params params = {
2407
+ /*.mem_size =*/ static_cast<size_t>(lm_gguf_get_n_tensors(ctx_gguf.get()) + 1) * lm_ggml_tensor_overhead(),
2408
+ /*.mem_buffer =*/ NULL,
2409
+ /*.no_alloc =*/ true,
2410
+ };
2411
+ ctx_clip.ctx_data.reset(lm_ggml_init(params));
2412
+ if (!ctx_clip.ctx_data) {
2413
+ throw std::runtime_error(string_format("%s: failed to init ggml context\n", __func__));
2414
+ }
2415
+
2416
+ // helper function
2417
+ auto get_tensor = [&](const std::string & name, bool required = true) {
2418
+ lm_ggml_tensor * cur = lm_ggml_get_tensor(ctx_meta.get(), name.c_str());
2419
+ if (!cur && required) {
2420
+ throw std::runtime_error(string_format("%s: unable to find tensor %s\n", __func__, name.c_str()));
2421
+ }
2422
+ if (cur) {
2423
+ tensors_to_load.push_back(cur);
2424
+ // add tensors to context
2425
+ lm_ggml_tensor * data_tensor = lm_ggml_dup_tensor(ctx_clip.ctx_data.get(), cur);
2426
+ lm_ggml_set_name(data_tensor, cur->name);
2427
+ cur = data_tensor;
2428
+ }
2429
+ return cur;
2430
+ };
2431
+
2432
+ model.class_embedding = get_tensor(TN_CLASS_EMBD, false);
2433
+
2434
+ model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, prefix, "weight"), false);
2435
+ model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, prefix, "bias"), false);
2436
+
2437
+ model.post_ln_w = get_tensor(string_format(TN_LN_POST, prefix, "weight"), false);
2438
+ model.post_ln_b = get_tensor(string_format(TN_LN_POST, prefix, "bias"), false);
2439
+
2440
+ model.patch_bias = get_tensor(TN_PATCH_BIAS, false);
2441
+ model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD, false);
2442
+ model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false);
2443
+
2444
+ model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, prefix), false);
2445
+
2446
+ // layers
2447
+ model.layers.resize(hparams.n_layer);
2448
+ for (int il = 0; il < hparams.n_layer; ++il) {
2449
+ auto & layer = model.layers[il];
2450
+ layer.k_w = get_tensor(string_format(TN_ATTN_K, prefix, il, "weight"));
2451
+ layer.q_w = get_tensor(string_format(TN_ATTN_Q, prefix, il, "weight"));
2452
+ layer.v_w = get_tensor(string_format(TN_ATTN_V, prefix, il, "weight"));
2453
+ layer.o_w = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "weight"));
2454
+ layer.k_norm = get_tensor(string_format(TN_ATTN_K_NORM, prefix, il, "weight"), false);
2455
+ layer.q_norm = get_tensor(string_format(TN_ATTN_Q_NORM, prefix, il, "weight"), false);
2456
+ layer.ln_1_w = get_tensor(string_format(TN_LN_1, prefix, il, "weight"), false);
2457
+ layer.ln_2_w = get_tensor(string_format(TN_LN_2, prefix, il, "weight"), false);
2458
+ layer.ls_1_w = get_tensor(string_format(TN_LS_1, prefix, il, "weight"), false); // no bias
2459
+ layer.ls_2_w = get_tensor(string_format(TN_LS_2, prefix, il, "weight"), false); // no bias
2460
+
2461
+ layer.k_b = get_tensor(string_format(TN_ATTN_K, prefix, il, "bias"), false);
2462
+ layer.q_b = get_tensor(string_format(TN_ATTN_Q, prefix, il, "bias"), false);
2463
+ layer.v_b = get_tensor(string_format(TN_ATTN_V, prefix, il, "bias"), false);
2464
+ layer.o_b = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "bias"), false);
2465
+ layer.ln_1_b = get_tensor(string_format(TN_LN_1, prefix, il, "bias"), false);
2466
+ layer.ln_2_b = get_tensor(string_format(TN_LN_2, prefix, il, "bias"), false);
2467
+
2468
+ // ffn
2469
+ layer.ff_up_w = get_tensor(string_format(TN_FFN_UP, prefix, il, "weight"));
2470
+ layer.ff_up_b = get_tensor(string_format(TN_FFN_UP, prefix, il, "bias"), false);
2471
+ layer.ff_gate_w = get_tensor(string_format(TN_FFN_GATE, prefix, il, "weight"), false);
2472
+ layer.ff_gate_b = get_tensor(string_format(TN_FFN_GATE, prefix, il, "bias"), false);
2473
+ layer.ff_down_w = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "weight"));
2474
+ layer.ff_down_b = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "bias"), false);
2475
+
2476
+ // some models already exported with legacy (incorrect) naming which is quite messy, let's fix it here
2477
+ // note: Qwen model converted from the old surgery script has n_ff = 0, so we cannot use n_ff to check!
2478
+ if (layer.ff_up_w && layer.ff_down_w && layer.ff_down_w->ne[0] == hparams.n_embd) {
2479
+ // swap up and down weights
2480
+ lm_ggml_tensor * tmp = layer.ff_up_w;
2481
+ layer.ff_up_w = layer.ff_down_w;
2482
+ layer.ff_down_w = tmp;
2483
+ // swap up and down biases
2484
+ tmp = layer.ff_up_b;
2485
+ layer.ff_up_b = layer.ff_down_b;
2486
+ layer.ff_down_b = tmp;
2487
+ }
2488
+ }
2489
+
2490
+ switch (model.proj_type) {
2491
+ case PROJECTOR_TYPE_MLP:
2492
+ case PROJECTOR_TYPE_MLP_NORM:
2493
+ {
2494
+ // LLaVA projection
2495
+ model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false);
2496
+ model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false);
2497
+ // Yi-type llava
2498
+ model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false);
2499
+ model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
2500
+ // missing in Yi-type llava
2501
+ model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false);
2502
+ model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
2503
+ // Yi-type llava
2504
+ model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false);
2505
+ model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false);
2506
+ model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false);
2507
+ model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false);
2508
+ if (model.mm_3_w) {
2509
+ // TODO: this is a hack to support Yi-type llava
2510
+ model.proj_type = PROJECTOR_TYPE_MLP_NORM;
2511
+ }
2512
+ model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false);
2513
+ } break;
2514
+ case PROJECTOR_TYPE_LDP:
2515
+ {
2516
+ // MobileVLM projection
2517
+ model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
2518
+ model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
2519
+ model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
2520
+ model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
2521
+ model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
2522
+ model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
2523
+ model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
2524
+ model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
2525
+ model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
2526
+ model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
2527
+ model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
2528
+ model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
2529
+ model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
2530
+ model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
2531
+ model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
2532
+ model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
2533
+ model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
2534
+ model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
2535
+ model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
2536
+ model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
2537
+ model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
2538
+ model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
2539
+ model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
2540
+ model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
2541
+ } break;
2542
+ case PROJECTOR_TYPE_LDPV2:
2543
+ {
2544
+ // MobilVLM_V2 projection
2545
+ model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
2546
+ model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
2547
+ model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
2548
+ model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias"));
2549
+ model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight"));
2550
+ model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias"));
2551
+ } break;
2552
+ case PROJECTOR_TYPE_MINICPMV:
2553
+ {
2554
+ // model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
2555
+ model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K);
2556
+ model.mm_model_query = get_tensor(TN_MINICPMV_QUERY);
2557
+ model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ);
2558
+ model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ);
2559
+ model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight"));
2560
+ model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight"));
2561
+ model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight"));
2562
+ model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias"));
2563
+ model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias"));
2564
+ model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias"));
2565
+ model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight"));
2566
+ model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias"));
2567
+ model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight"));
2568
+ model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias"));
2569
+ model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight"));
2570
+ model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias"));
2571
+ model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight"));
2572
+ model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias"));
2573
+ } break;
2574
+ case PROJECTOR_TYPE_GLM_EDGE:
2575
+ {
2576
+ model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
2577
+ model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
2578
+ model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight"));
2579
+ model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight"));
2580
+ model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias"));
2581
+ model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight"));
2582
+ model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight"));
2583
+ model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight"));
2584
+ model.mm_glm_tok_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight"));
2585
+ model.mm_glm_tok_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight"));
2586
+ } break;
2587
+ case PROJECTOR_TYPE_QWEN2VL:
2588
+ case PROJECTOR_TYPE_QWEN25VL:
2589
+ {
2590
+ model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
2591
+ model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
2592
+ model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
2593
+ model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
2594
+ } break;
2595
+ case PROJECTOR_TYPE_GEMMA3:
2596
+ {
2597
+ model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ);
2598
+ model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N);
2599
+ } break;
2600
+ case PROJECTOR_TYPE_IDEFICS3:
2601
+ {
2602
+ model.projection = get_tensor(TN_MM_PROJECTOR);
2603
+ } break;
2604
+ case PROJECTOR_TYPE_LFM2:
2605
+ {
2606
+ model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM);
2607
+ model.mm_input_norm_b = get_tensor(TN_MM_INP_NORM_B);
2608
+ model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
2609
+ model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"));
2610
+ model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
2611
+ model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
2612
+ } break;
2613
+ case PROJECTOR_TYPE_PIXTRAL:
2614
+ {
2615
+ model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
2616
+ model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
2617
+ model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
2618
+ model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
2619
+ // [IMG_BREAK] token embedding
2620
+ model.token_embd_img_break = get_tensor(TN_TOK_IMG_BREAK);
2621
+ // for mistral small 3.1
2622
+ model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false);
2623
+ model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false);
2624
+ } break;
2625
+ case PROJECTOR_TYPE_ULTRAVOX:
2626
+ {
2627
+ model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
2628
+ model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
2629
+ model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
2630
+ model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
2631
+ model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
2632
+ model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
2633
+ model.mm_norm_pre_w = get_tensor(string_format(TN_MM_NORM_PRE, "weight"));
2634
+ model.mm_norm_mid_w = get_tensor(string_format(TN_MM_NORM_MID, "weight"));
2635
+ } break;
2636
+ case PROJECTOR_TYPE_QWEN2A:
2637
+ {
2638
+ model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
2639
+ model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
2640
+ model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
2641
+ model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
2642
+ model.mm_fc_w = get_tensor(string_format(TN_MM_AUDIO_FC, "weight"));
2643
+ model.mm_fc_b = get_tensor(string_format(TN_MM_AUDIO_FC, "bias"));
2644
+ } break;
2645
+ case PROJECTOR_TYPE_VOXTRAL:
2646
+ {
2647
+ model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
2648
+ model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
2649
+ model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
2650
+ model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
2651
+ model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
2652
+ model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
2653
+ } break;
2654
+ case PROJECTOR_TYPE_INTERNVL:
2655
+ {
2656
+ model.mm_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
2657
+ model.mm_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
2658
+ model.mm_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
2659
+ model.mm_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
2660
+ model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
2661
+ model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
2662
+ } break;
2663
+ case PROJECTOR_TYPE_LLAMA4:
2664
+ {
2665
+ model.mm_model_proj = get_tensor(TN_MM_PROJECTOR);
2666
+ model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
2667
+ model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
2668
+ } break;
2669
+ default:
2670
+ LM_GGML_ASSERT(false && "unknown projector type");
2671
+ }
2672
+
2673
+ // load data
2674
+ {
2675
+ std::vector<uint8_t> read_buf;
2676
+
2677
+ auto fin = std::ifstream(fname, std::ios::binary);
2678
+ if (!fin) {
2679
+ throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str()));
2680
+ }
2681
+
2682
+ // alloc memory and offload data
2683
+ lm_ggml_backend_buffer_type_t buft = lm_ggml_backend_get_default_buffer_type(ctx_clip.backend);
2684
+ ctx_clip.buf.reset(lm_ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft));
2685
+ lm_ggml_backend_buffer_set_usage(ctx_clip.buf.get(), LM_GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
2686
+ for (auto & t : tensors_to_load) {
2687
+ lm_ggml_tensor * cur = lm_ggml_get_tensor(ctx_clip.ctx_data.get(), t->name);
2688
+ const size_t offset = tensor_offset[t->name];
2689
+ fin.seekg(offset, std::ios::beg);
2690
+ if (!fin) {
2691
+ throw std::runtime_error(string_format("%s: failed to seek for tensor %s\n", __func__, t->name));
2692
+ }
2693
+ size_t num_bytes = lm_ggml_nbytes(cur);
2694
+ if (lm_ggml_backend_buft_is_host(buft)) {
2695
+ // for the CPU and Metal backend, we can read directly into the tensor
2696
+ fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
2697
+ } else {
2698
+ // read into a temporary buffer first, then copy to device memory
2699
+ read_buf.resize(num_bytes);
2700
+ fin.read(reinterpret_cast<char *>(read_buf.data()), num_bytes);
2701
+ lm_ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
2702
+ }
2703
+ }
2704
+ fin.close();
2705
+
2706
+ LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
2707
+ }
2708
+ }
2709
+
2710
+ void alloc_compute_meta(clip_ctx & ctx_clip) {
2711
+ const auto & hparams = ctx_clip.model.hparams;
2712
+ ctx_clip.buf_compute_meta.resize(ctx_clip.max_nodes * lm_ggml_tensor_overhead() + lm_ggml_graph_overhead());
2713
+
2714
+ // create a fake batch
2715
+ clip_image_f32_batch batch;
2716
+ clip_image_f32_ptr img(clip_image_f32_init());
2717
+ if (ctx_clip.model.modality == CLIP_MODALITY_VISION) {
2718
+ img->nx = hparams.warmup_image_size;
2719
+ img->ny = hparams.warmup_image_size;
2720
+ } else {
2721
+ img->nx = hparams.warmup_audio_size;
2722
+ img->ny = hparams.n_mel_bins;
2723
+ }
2724
+ batch.entries.push_back(std::move(img));
2725
+
2726
+ lm_ggml_cgraph * gf = clip_image_build_graph(&ctx_clip, batch);
2727
+ lm_ggml_backend_sched_reserve(ctx_clip.sched.get(), gf);
2728
+
2729
+ for (size_t i = 0; i < ctx_clip.backend_ptrs.size(); ++i) {
2730
+ lm_ggml_backend_t backend = ctx_clip.backend_ptrs[i];
2731
+ lm_ggml_backend_buffer_type_t buft = ctx_clip.backend_buft[i];
2732
+ size_t size = lm_ggml_backend_sched_get_buffer_size(ctx_clip.sched.get(), backend);
2733
+ if (size > 1) {
2734
+ LOG_INF("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
2735
+ lm_ggml_backend_buft_name(buft),
2736
+ size / 1024.0 / 1024.0);
2737
+ }
2738
+ }
2739
+ }
2740
+
2741
+ void get_bool(const std::string & key, bool & output, bool required = true) {
2742
+ const int i = lm_gguf_find_key(ctx_gguf.get(), key.c_str());
2743
+ if (i < 0) {
2744
+ if (required) throw std::runtime_error("Key not found: " + key);
2745
+ return;
2746
+ }
2747
+ output = lm_gguf_get_val_bool(ctx_gguf.get(), i);
2748
+ }
2749
+
2750
+ void get_i32(const std::string & key, int & output, bool required = true) {
2751
+ const int i = lm_gguf_find_key(ctx_gguf.get(), key.c_str());
2752
+ if (i < 0) {
2753
+ if (required) throw std::runtime_error("Key not found: " + key);
2754
+ return;
2755
+ }
2756
+ output = lm_gguf_get_val_i32(ctx_gguf.get(), i);
2757
+ }
2758
+
2759
+ void get_u32(const std::string & key, int & output, bool required = true) {
2760
+ const int i = lm_gguf_find_key(ctx_gguf.get(), key.c_str());
2761
+ if (i < 0) {
2762
+ if (required) throw std::runtime_error("Key not found: " + key);
2763
+ return;
2764
+ }
2765
+ output = lm_gguf_get_val_u32(ctx_gguf.get(), i);
2766
+ }
2767
+
2768
+ void get_f32(const std::string & key, float & output, bool required = true) {
2769
+ const int i = lm_gguf_find_key(ctx_gguf.get(), key.c_str());
2770
+ if (i < 0) {
2771
+ if (required) throw std::runtime_error("Key not found: " + key);
2772
+ return;
2773
+ }
2774
+ output = lm_gguf_get_val_f32(ctx_gguf.get(), i);
2775
+ }
2776
+
2777
+ void get_string(const std::string & key, std::string & output, bool required = true) {
2778
+ const int i = lm_gguf_find_key(ctx_gguf.get(), key.c_str());
2779
+ if (i < 0) {
2780
+ if (required) throw std::runtime_error("Key not found: " + key);
2781
+ return;
2782
+ }
2783
+ output = std::string(lm_gguf_get_val_str(ctx_gguf.get(), i));
2784
+ }
2785
+
2786
+ void get_arr_int(const std::string & key, std::vector<int> & output, bool required = true) {
2787
+ const int i = lm_gguf_find_key(ctx_gguf.get(), key.c_str());
2788
+ if (i < 0) {
2789
+ if (required) throw std::runtime_error("Key not found: " + key);
2790
+ return;
2791
+ }
2792
+ int n = lm_gguf_get_arr_n(ctx_gguf.get(), i);
2793
+ output.resize(n);
2794
+ const int32_t * values = (const int32_t *)lm_gguf_get_arr_data(ctx_gguf.get(), i);
2795
+ for (int i = 0; i < n; ++i) {
2796
+ output[i] = values[i];
2797
+ }
2798
+ }
2799
+
2800
+ void set_llava_uhd_res_candidates(clip_model & model, const int max_patches_per_side) {
2801
+ auto & hparams = model.hparams;
2802
+ for (int x = 1; x <= max_patches_per_side; x++) {
2803
+ for (int y = 1; y <= max_patches_per_side; y++) {
2804
+ if (x == 1 && y == 1) {
2805
+ continue; // skip the first point
2806
+ }
2807
+ hparams.image_res_candidates.push_back(clip_image_size{
2808
+ x*hparams.image_size,
2809
+ y*hparams.image_size,
2810
+ });
2811
+ }
2812
+ }
2813
+ }
2814
+ };
2815
+
2816
+ struct clip_init_result clip_init(const char * fname, struct clip_context_params ctx_params) {
2817
+ g_logger_state.verbosity_thold = ctx_params.verbosity;
2818
+ clip_ctx * ctx_vision = nullptr;
2819
+ clip_ctx * ctx_audio = nullptr;
2820
+
2821
+ try {
2822
+ clip_model_loader loader(fname);
2823
+
2824
+ if (loader.has_vision) {
2825
+ ctx_vision = new clip_ctx(ctx_params);
2826
+ loader.load_hparams(ctx_vision->model, CLIP_MODALITY_VISION);
2827
+ loader.load_tensors(*ctx_vision);
2828
+ loader.alloc_compute_meta(*ctx_vision);
2829
+ }
2830
+
2831
+ if (loader.has_audio) {
2832
+ ctx_audio = new clip_ctx(ctx_params);
2833
+ loader.load_hparams(ctx_audio->model, CLIP_MODALITY_AUDIO);
2834
+ loader.load_tensors(*ctx_audio);
2835
+ loader.alloc_compute_meta(*ctx_audio);
2836
+ }
2837
+
2838
+ } catch (const std::exception & e) {
2839
+ LOG_ERR("%s: failed to load model '%s': %s\n", __func__, fname, e.what());
2840
+ if (ctx_vision) {
2841
+ delete ctx_vision;
2842
+ }
2843
+ if (ctx_audio) {
2844
+ delete ctx_audio;
2845
+ }
2846
+ return {nullptr, nullptr};
2847
+ }
2848
+
2849
+ return {ctx_vision, ctx_audio};
2850
+ }
2851
+
2852
+ struct clip_image_size * clip_image_size_init() {
2853
+ struct clip_image_size * load_image_size = new struct clip_image_size();
2854
+ load_image_size->width = 448;
2855
+ load_image_size->height = 448;
2856
+ return load_image_size;
2857
+ }
2858
+
2859
+ struct clip_image_u8 * clip_image_u8_init() {
2860
+ return new clip_image_u8();
2861
+ }
2862
+
2863
+ struct clip_image_f32 * clip_image_f32_init() {
2864
+ return new clip_image_f32();
2865
+ }
2866
+
2867
+ struct clip_image_f32_batch * clip_image_f32_batch_init() {
2868
+ return new clip_image_f32_batch();
2869
+ }
2870
+
2871
+ unsigned char * clip_image_u8_get_data(struct clip_image_u8 * img, uint32_t * nx, uint32_t * ny) {
2872
+ if (nx) *nx = img->nx;
2873
+ if (ny) *ny = img->ny;
2874
+ return img->buf.data();
2875
+ }
2876
+
2877
+ void clip_image_size_free(struct clip_image_size * load_image_size) {
2878
+ if (load_image_size == nullptr) {
2879
+ return;
2880
+ }
2881
+ delete load_image_size;
2882
+ }
2883
+ void clip_image_u8_free(struct clip_image_u8 * img) { if (img) delete img; }
2884
+ void clip_image_f32_free(struct clip_image_f32 * img) { if (img) delete img; }
2885
+ void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { if (batch) delete batch; }
2886
+ void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) { if (batch) delete batch; }
2887
+
2888
+ size_t clip_image_f32_batch_n_images(const struct clip_image_f32_batch * batch) {
2889
+ return batch->entries.size();
2890
+ }
2891
+
2892
+ size_t clip_image_f32_batch_nx(const struct clip_image_f32_batch * batch, int idx) {
2893
+ if (idx < 0 || idx >= (int)batch->entries.size()) {
2894
+ LOG_ERR("%s: invalid index %d\n", __func__, idx);
2895
+ return 0;
2896
+ }
2897
+ return batch->entries[idx]->nx;
2898
+ }
2899
+
2900
+ size_t clip_image_f32_batch_ny(const struct clip_image_f32_batch * batch, int idx) {
2901
+ if (idx < 0 || idx >= (int)batch->entries.size()) {
2902
+ LOG_ERR("%s: invalid index %d\n", __func__, idx);
2903
+ return 0;
2904
+ }
2905
+ return batch->entries[idx]->ny;
2906
+ }
2907
+
2908
+ clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch * batch, int idx) {
2909
+ if (idx < 0 || idx >= (int)batch->entries.size()) {
2910
+ LOG_ERR("%s: invalid index %d\n", __func__, idx);
2911
+ return nullptr;
2912
+ }
2913
+ return batch->entries[idx].get();
2914
+ }
2915
+
2916
+ void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, clip_image_u8 * img) {
2917
+ img->nx = nx;
2918
+ img->ny = ny;
2919
+ img->buf.resize(3 * nx * ny);
2920
+ memcpy(img->buf.data(), rgb_pixels, img->buf.size());
2921
+ }
2922
+
2923
+ // Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
2924
+ static void normalize_image_u8_to_f32(const clip_image_u8 & src, clip_image_f32 & dst, const float mean[3], const float std[3]) {
2925
+ dst.nx = src.nx;
2926
+ dst.ny = src.ny;
2927
+ dst.buf.resize(src.buf.size());
2928
+
2929
+ // TODO @ngxson : seems like this could be done more efficiently on cgraph
2930
+ for (size_t i = 0; i < src.buf.size(); ++i) {
2931
+ int c = i % 3; // rgb
2932
+ dst.buf[i] = (static_cast<float>(src.buf[i]) / 255.0f - mean[c]) / std[c];
2933
+ }
2934
+ }
2935
+
2936
+ // set of tools to manupulate images
2937
+ // in the future, we can have HW acceleration by allowing this struct to access 3rd party lib like imagick or opencv
2938
+ struct image_manipulation {
2939
+ // Bilinear resize function
2940
+ static void bilinear_resize(const clip_image_u8& src, clip_image_u8& dst, int target_width, int target_height) {
2941
+ dst.nx = target_width;
2942
+ dst.ny = target_height;
2943
+ dst.buf.resize(3 * target_width * target_height);
2944
+
2945
+ float x_ratio = static_cast<float>(src.nx - 1) / target_width;
2946
+ float y_ratio = static_cast<float>(src.ny - 1) / target_height;
2947
+
2948
+ for (int y = 0; y < target_height; y++) {
2949
+ for (int x = 0; x < target_width; x++) {
2950
+ float px = x_ratio * x;
2951
+ float py = y_ratio * y;
2952
+ int x_floor = static_cast<int>(px);
2953
+ int y_floor = static_cast<int>(py);
2954
+ float x_lerp = px - x_floor;
2955
+ float y_lerp = py - y_floor;
2956
+
2957
+ for (int c = 0; c < 3; c++) {
2958
+ float top = lerp(
2959
+ static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
2960
+ static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
2961
+ x_lerp
2962
+ );
2963
+ float bottom = lerp(
2964
+ static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
2965
+ static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
2966
+ x_lerp
2967
+ );
2968
+ dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, y_lerp));
2969
+ }
2970
+ }
2971
+ }
2972
+ }
2973
+
2974
+ // Bicubic resize function
2975
+ // part of image will be cropped if the aspect ratio is different
2976
+ static bool bicubic_resize(const clip_image_u8 & img, clip_image_u8 & dst, int target_width, int target_height) {
2977
+ const int nx = img.nx;
2978
+ const int ny = img.ny;
2979
+
2980
+ dst.nx = target_width;
2981
+ dst.ny = target_height;
2982
+ dst.buf.resize(3 * target_width * target_height);
2983
+
2984
+ float Cc;
2985
+ float C[5];
2986
+ float d0, d2, d3, a0, a1, a2, a3;
2987
+ int i, j, k, jj;
2988
+ int x, y;
2989
+ float dx, dy;
2990
+ float tx, ty;
2991
+
2992
+ tx = (float)nx / (float)target_width;
2993
+ ty = (float)ny / (float)target_height;
2994
+
2995
+ // Bicubic interpolation; adapted from ViT.cpp, inspired from :
2996
+ // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36
2997
+ // -> https://en.wikipedia.org/wiki/Bicubic_interpolation
2998
+
2999
+ for (i = 0; i < target_height; i++) {
3000
+ for (j = 0; j < target_width; j++) {
3001
+ x = (int)(tx * j);
3002
+ y = (int)(ty * i);
3003
+
3004
+ dx = tx * j - x;
3005
+ dy = ty * i - y;
3006
+
3007
+ for (k = 0; k < 3; k++) {
3008
+ for (jj = 0; jj <= 3; jj++) {
3009
+ d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
3010
+ d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
3011
+ d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
3012
+ a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
3013
+
3014
+ a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
3015
+ a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
3016
+ a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
3017
+
3018
+ C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx;
3019
+
3020
+ d0 = C[0] - C[1];
3021
+ d2 = C[2] - C[1];
3022
+ d3 = C[3] - C[1];
3023
+ a0 = C[1];
3024
+ a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
3025
+ a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
3026
+ a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
3027
+ Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy;
3028
+
3029
+ const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f);
3030
+ dst.buf[(i * target_width + j) * 3 + k] = float(Cc2);
3031
+ }
3032
+ }
3033
+ }
3034
+ }
3035
+
3036
+ return true;
3037
+ }
3038
+
3039
+ // llava-1.6 type of resize_and_pad
3040
+ // if the ratio is not 1:1, padding with pad_color will be applied
3041
+ // pad_color is single channel, default is 0 (black)
3042
+ static void resize_and_pad_image(const clip_image_u8 & image, clip_image_u8 & dst, const clip_image_size & target_resolution, std::array<uint8_t, 3> pad_color = {0, 0, 0}) {
3043
+ int target_width = target_resolution.width;
3044
+ int target_height = target_resolution.height;
3045
+
3046
+ float scale_w = static_cast<float>(target_width) / image.nx;
3047
+ float scale_h = static_cast<float>(target_height) / image.ny;
3048
+
3049
+ int new_width, new_height;
3050
+
3051
+ if (scale_w < scale_h) {
3052
+ new_width = target_width;
3053
+ new_height = std::min(static_cast<int>(std::ceil(image.ny * scale_w)), target_height);
3054
+ } else {
3055
+ new_height = target_height;
3056
+ new_width = std::min(static_cast<int>(std::ceil(image.nx * scale_h)), target_width);
3057
+ }
3058
+
3059
+ clip_image_u8 resized_image;
3060
+ bicubic_resize(image, resized_image, new_width, new_height);
3061
+
3062
+ clip_image_u8 padded_image;
3063
+ padded_image.nx = target_width;
3064
+ padded_image.ny = target_height;
3065
+ padded_image.buf.resize(3 * target_width * target_height);
3066
+
3067
+ // Fill the padded image with the fill color
3068
+ for (size_t i = 0; i < padded_image.buf.size(); i += 3) {
3069
+ padded_image.buf[i] = pad_color[0];
3070
+ padded_image.buf[i + 1] = pad_color[1];
3071
+ padded_image.buf[i + 2] = pad_color[2];
3072
+ }
3073
+
3074
+ // Calculate padding offsets
3075
+ int pad_x = (target_width - new_width) / 2;
3076
+ int pad_y = (target_height - new_height) / 2;
3077
+
3078
+ // Copy the resized image into the center of the padded buffer
3079
+ for (int y = 0; y < new_height; ++y) {
3080
+ for (int x = 0; x < new_width; ++x) {
3081
+ for (int c = 0; c < 3; ++c) {
3082
+ padded_image.buf[3 * ((y + pad_y) * target_width + (x + pad_x)) + c] = resized_image.buf[3 * (y * new_width + x) + c];
3083
+ }
3084
+ }
3085
+ }
3086
+ dst = std::move(padded_image);
3087
+ }
3088
+
3089
+ static void crop_image(const clip_image_u8 & image, clip_image_u8 & dst, int x, int y, int w, int h) {
3090
+ dst.nx = w;
3091
+ dst.ny = h;
3092
+ dst.buf.resize(3 * w * h);
3093
+
3094
+ for (int i = 0; i < h; ++i) {
3095
+ for (int j = 0; j < w; ++j) {
3096
+ int src_idx = 3 * ((y + i)*image.nx + (x + j));
3097
+ int dst_idx = 3 * (i*w + j);
3098
+ dst.buf[dst_idx] = image.buf[src_idx];
3099
+ dst.buf[dst_idx + 1] = image.buf[src_idx + 1];
3100
+ dst.buf[dst_idx + 2] = image.buf[src_idx + 2];
3101
+ }
3102
+ }
3103
+ }
3104
+
3105
+ // calculate the size of the **resized** image, while preserving the aspect ratio
3106
+ // the calculated size will be aligned to the nearest multiple of align_size
3107
+ // if H or W size is larger than max_dimension, it will be resized to max_dimension
3108
+ static clip_image_size calc_size_preserved_ratio(const clip_image_size & inp_size, const int align_size, const int max_dimension) {
3109
+ if (inp_size.width <= 0 || inp_size.height <= 0 || align_size <= 0 || max_dimension <= 0) {
3110
+ return {0, 0};
3111
+ }
3112
+
3113
+ float scale = std::min(1.0f, std::min(static_cast<float>(max_dimension) / inp_size.width,
3114
+ static_cast<float>(max_dimension) / inp_size.height));
3115
+
3116
+ float target_width_f = static_cast<float>(inp_size.width) * scale;
3117
+ float target_height_f = static_cast<float>(inp_size.height) * scale;
3118
+
3119
+ int aligned_width = CLIP_ALIGN((int)target_width_f, align_size);
3120
+ int aligned_height = CLIP_ALIGN((int)target_height_f, align_size);
3121
+
3122
+ return {aligned_width, aligned_height};
3123
+ }
3124
+
3125
+ private:
3126
+ static inline int clip(int x, int lower, int upper) {
3127
+ return std::max(lower, std::min(x, upper));
3128
+ }
3129
+
3130
+ // Linear interpolation between two points
3131
+ static inline float lerp(float s, float e, float t) {
3132
+ return s + (e - s) * t;
3133
+ }
3134
+ };
3135
+
3136
+ /**
3137
+ * implementation of LLaVA-UHD:
3138
+ * - https://arxiv.org/pdf/2403.11703
3139
+ * - https://github.com/thunlp/LLaVA-UHD
3140
+ * - https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118
3141
+ *
3142
+ * overview:
3143
+ * - an image always have a single overview (downscaled image)
3144
+ * - an image can have 0 or multiple slices, depending on the image size
3145
+ * - each slice can then be considered as a separate image
3146
+ *
3147
+ * for example:
3148
+ *
3149
+ * [overview] --> [slice 1] --> [slice 2]
3150
+ * | |
3151
+ * +--> [slice 3] --> [slice 4]
3152
+ */
3153
+ struct llava_uhd {
3154
+ struct slice_coordinates {
3155
+ int x;
3156
+ int y;
3157
+ clip_image_size size;
3158
+ };
3159
+
3160
+ struct slice_instructions {
3161
+ clip_image_size overview_size; // size of downscaled image
3162
+ clip_image_size refined_size; // size of image right before slicing (must be multiple of slice size)
3163
+ clip_image_size grid_size; // grid_size.width * grid_size.height = number of slices
3164
+ std::vector<slice_coordinates> slices;
3165
+ bool padding_refined = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6)
3166
+ };
3167
+
3168
+ static slice_instructions get_slice_instructions(struct clip_ctx * ctx, const clip_image_size & original_size) {
3169
+ slice_instructions res;
3170
+ const int patch_size = clip_get_patch_size(ctx);
3171
+ const int slice_size = clip_get_image_size(ctx);
3172
+ const int original_width = original_size.width;
3173
+ const int original_height = original_size.height;
3174
+
3175
+ const bool has_slices = original_size.width > slice_size || original_size.height > slice_size;
3176
+ const bool has_pinpoints = !ctx->model.hparams.image_res_candidates.empty();
3177
+
3178
+ if (!has_slices) {
3179
+ // skip slicing logic
3180
+ res.overview_size = clip_image_size{slice_size, slice_size};
3181
+ res.refined_size = clip_image_size{0, 0};
3182
+ res.grid_size = clip_image_size{0, 0};
3183
+
3184
+ return res;
3185
+ }
3186
+
3187
+ if (has_pinpoints) {
3188
+ // has pinpoints, use them to calculate the grid size (e.g. llava-1.6)
3189
+ auto refine_size = llava_uhd::select_best_resolution(
3190
+ original_size,
3191
+ ctx->model.hparams.image_res_candidates);
3192
+ res.overview_size = clip_image_size{slice_size, slice_size};
3193
+ res.refined_size = refine_size;
3194
+ res.grid_size = clip_image_size{0, 0};
3195
+ res.padding_refined = true;
3196
+
3197
+ LOG_DBG("%s: using pinpoints for slicing\n", __func__);
3198
+ LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d\n",
3199
+ __func__, original_width, original_height,
3200
+ res.overview_size.width, res.overview_size.height,
3201
+ res.refined_size.width, res.refined_size.height);
3202
+
3203
+ for (int y = 0; y < refine_size.height; y += slice_size) {
3204
+ for (int x = 0; x < refine_size.width; x += slice_size) {
3205
+ slice_coordinates slice;
3206
+ slice.x = x;
3207
+ slice.y = y;
3208
+ slice.size.width = std::min(slice_size, refine_size.width - x);
3209
+ slice.size.height = std::min(slice_size, refine_size.height - y);
3210
+ res.slices.push_back(slice);
3211
+ LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
3212
+ __func__, (int)res.slices.size() - 1,
3213
+ slice.x, slice.y, slice.size.width, slice.size.height);
3214
+ }
3215
+ }
3216
+
3217
+ res.grid_size.height = refine_size.height / slice_size;
3218
+ res.grid_size.width = refine_size.width / slice_size;
3219
+ LOG_DBG("%s: grid size: %d x %d\n", __func__, res.grid_size.width, res.grid_size.height);
3220
+
3221
+ return res;
3222
+ }
3223
+
3224
+ // no pinpoints, dynamically calculate the grid size (e.g. minicpmv)
3225
+
3226
+ auto best_size = get_best_resize(original_size, slice_size, patch_size, !has_slices);
3227
+ res.overview_size = best_size;
3228
+
3229
+ {
3230
+ const int max_slice_nums = 9; // TODO: this is only used by minicpmv, maybe remove it
3231
+ const float log_ratio = log((float)original_width / original_height);
3232
+ const float ratio = (float)original_width * original_height / (slice_size * slice_size);
3233
+ const int multiple = fmin(ceil(ratio), max_slice_nums);
3234
+
3235
+ auto best_grid = get_best_grid(max_slice_nums, multiple, log_ratio);
3236
+ auto refine_size = get_refine_size(original_size, best_grid, slice_size, patch_size, true);
3237
+ res.grid_size = best_grid;
3238
+ res.refined_size = refine_size;
3239
+
3240
+ LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d, grid size: %d x %d\n",
3241
+ __func__, original_width, original_height,
3242
+ res.overview_size.width, res.overview_size.height,
3243
+ res.refined_size.width, res.refined_size.height,
3244
+ res.grid_size.width, res.grid_size.height);
3245
+
3246
+ int width = refine_size.width;
3247
+ int height = refine_size.height;
3248
+ int grid_x = int(width / best_grid.width);
3249
+ int grid_y = int(height / best_grid.height);
3250
+ for (int patches_y = 0, ic = 0;
3251
+ patches_y < refine_size.height && ic < best_grid.height;
3252
+ patches_y += grid_y, ic += 1) {
3253
+ for (int patches_x = 0, jc = 0;
3254
+ patches_x < refine_size.width && jc < best_grid.width;
3255
+ patches_x += grid_x, jc += 1) {
3256
+ slice_coordinates slice;
3257
+ slice.x = patches_x;
3258
+ slice.y = patches_y;
3259
+ slice.size.width = grid_x;
3260
+ slice.size.height = grid_y;
3261
+ res.slices.push_back(slice);
3262
+ LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
3263
+ __func__, (int)res.slices.size() - 1,
3264
+ slice.x, slice.y, slice.size.width, slice.size.height);
3265
+ }
3266
+ }
3267
+ }
3268
+
3269
+ return res;
3270
+ }
3271
+
3272
+ static std::vector<clip_image_u8_ptr> slice_image(const clip_image_u8 * img, const slice_instructions & inst) {
3273
+ std::vector<clip_image_u8_ptr> output;
3274
+
3275
+ // resize to overview size
3276
+ clip_image_u8_ptr resized_img(clip_image_u8_init());
3277
+ image_manipulation::bicubic_resize(*img, *resized_img, inst.overview_size.width, inst.overview_size.height);
3278
+ output.push_back(std::move(resized_img));
3279
+ if (inst.slices.empty()) {
3280
+ // no slices, just return the resized image
3281
+ return output;
3282
+ }
3283
+
3284
+ // resize to refined size
3285
+ clip_image_u8_ptr refined_img(clip_image_u8_init());
3286
+ if (inst.padding_refined) {
3287
+ image_manipulation::resize_and_pad_image(*img, *refined_img, inst.refined_size);
3288
+ } else {
3289
+ image_manipulation::bilinear_resize(*img, *refined_img, inst.refined_size.width, inst.refined_size.height);
3290
+ }
3291
+
3292
+ // create slices
3293
+ for (const auto & slice : inst.slices) {
3294
+ int x = slice.x;
3295
+ int y = slice.y;
3296
+ int w = slice.size.width;
3297
+ int h = slice.size.height;
3298
+
3299
+ clip_image_u8_ptr img_slice(clip_image_u8_init());
3300
+ image_manipulation::crop_image(*refined_img, *img_slice, x, y, w, h);
3301
+ output.push_back(std::move(img_slice));
3302
+ }
3303
+
3304
+ return output;
3305
+ }
3306
+
3307
+ private:
3308
+ static clip_image_size get_best_resize(const clip_image_size & original_size, int scale_resolution, int patch_size, bool allow_upscale = false) {
3309
+ int width = original_size.width;
3310
+ int height = original_size.height;
3311
+ if ((width * height > scale_resolution * scale_resolution) || allow_upscale) {
3312
+ float r = static_cast<float>(width) / height;
3313
+ height = static_cast<int>(scale_resolution / std::sqrt(r));
3314
+ width = static_cast<int>(height * r);
3315
+ }
3316
+ clip_image_size res;
3317
+ res.width = ensure_divide(width, patch_size);
3318
+ res.height = ensure_divide(height, patch_size);
3319
+ return res;
3320
+ }
3321
+
3322
+ static clip_image_size resize_maintain_aspect_ratio(const clip_image_size & orig, const clip_image_size & target_max) {
3323
+ float scale_width = static_cast<float>(target_max.width) / orig.width;
3324
+ float scale_height = static_cast<float>(target_max.height) / orig.height;
3325
+ float scale = std::min(scale_width, scale_height);
3326
+ return clip_image_size{
3327
+ static_cast<int>(orig.width * scale),
3328
+ static_cast<int>(orig.height * scale),
3329
+ };
3330
+ }
3331
+
3332
+ /**
3333
+ * Selects the best resolution from a list of possible resolutions based on the original size.
3334
+ *
3335
+ * For example, when given a list of resolutions:
3336
+ * - 100x100
3337
+ * - 200x100
3338
+ * - 100x200
3339
+ * - 200x200
3340
+ *
3341
+ * And an input image of size 111x200, then 100x200 is the best fit (least wasted resolution).
3342
+ *
3343
+ * @param original_size The original size of the image
3344
+ * @param possible_resolutions A list of possible resolutions
3345
+ * @return The best fit resolution
3346
+ */
3347
+ static clip_image_size select_best_resolution(const clip_image_size & original_size, const std::vector<clip_image_size> & possible_resolutions) {
3348
+ clip_image_size best_fit;
3349
+ int min_wasted_area = std::numeric_limits<int>::max();
3350
+ int max_effective_resolution = 0;
3351
+
3352
+ for (const clip_image_size & candidate : possible_resolutions) {
3353
+ auto target_size = resize_maintain_aspect_ratio(original_size, candidate);
3354
+ int effective_resolution = std::min(
3355
+ target_size.width * target_size.height,
3356
+ original_size.width * original_size.height);
3357
+ int wasted_area = (candidate.width * candidate.height) - effective_resolution;
3358
+
3359
+ if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_area < min_wasted_area)) {
3360
+ max_effective_resolution = effective_resolution;
3361
+ min_wasted_area = wasted_area;
3362
+ best_fit = candidate;
3363
+ }
3364
+
3365
+ LOG_DBG("%s: candidate: %d x %d, target: %d x %d, wasted: %d, effective: %d\n", __func__, candidate.width, candidate.height, target_size.width, target_size.height, wasted_area, effective_resolution);
3366
+ }
3367
+
3368
+ return best_fit;
3369
+ }
3370
+
3371
+ static int ensure_divide(int length, int patch_size) {
3372
+ return std::max(static_cast<int>(std::round(static_cast<float>(length) / patch_size) * patch_size), patch_size);
3373
+ }
3374
+
3375
+ static clip_image_size get_refine_size(const clip_image_size & original_size, const clip_image_size & grid, int scale_resolution, int patch_size, bool allow_upscale = false) {
3376
+ int width = original_size.width;
3377
+ int height = original_size.height;
3378
+ int grid_x = grid.width;
3379
+ int grid_y = grid.height;
3380
+
3381
+ int refine_width = ensure_divide(width, grid_x);
3382
+ int refine_height = ensure_divide(height, grid_y);
3383
+
3384
+ clip_image_size grid_size;
3385
+ grid_size.width = refine_width / grid_x;
3386
+ grid_size.height = refine_height / grid_y;
3387
+
3388
+ auto best_grid_size = get_best_resize(grid_size, scale_resolution, patch_size, allow_upscale);
3389
+ int best_grid_width = best_grid_size.width;
3390
+ int best_grid_height = best_grid_size.height;
3391
+
3392
+ clip_image_size refine_size;
3393
+ refine_size.width = best_grid_width * grid_x;
3394
+ refine_size.height = best_grid_height * grid_y;
3395
+ return refine_size;
3396
+ }
3397
+
3398
+ static clip_image_size get_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) {
3399
+ std::vector<int> candidate_split_grids_nums;
3400
+ for (int i : {multiple - 1, multiple, multiple + 1}) {
3401
+ if (i == 1 || i > max_slice_nums) {
3402
+ continue;
3403
+ }
3404
+ candidate_split_grids_nums.push_back(i);
3405
+ }
3406
+
3407
+ std::vector<clip_image_size> candidate_grids;
3408
+ for (int split_grids_nums : candidate_split_grids_nums) {
3409
+ int m = 1;
3410
+ while (m <= split_grids_nums) {
3411
+ if (split_grids_nums % m == 0) {
3412
+ candidate_grids.push_back(clip_image_size{m, split_grids_nums / m});
3413
+ }
3414
+ ++m;
3415
+ }
3416
+ }
3417
+
3418
+ clip_image_size best_grid{1, 1};
3419
+ float min_error = std::numeric_limits<float>::infinity();
3420
+ for (const auto& grid : candidate_grids) {
3421
+ float error = std::abs(log_ratio - std::log(1.0 * grid.width / grid.height));
3422
+ if (error < min_error) {
3423
+ best_grid = grid;
3424
+ min_error = error;
3425
+ }
3426
+ }
3427
+ return best_grid;
3428
+ }
3429
+ };
3430
+
3431
+ // returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
3432
+ // res_imgs memory is being allocated here, previous allocations will be freed if found
3433
+ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, struct clip_image_f32_batch * res_imgs) {
3434
+ clip_image_size original_size{img->nx, img->ny};
3435
+ bool pad_to_square = true;
3436
+ auto & params = ctx->model.hparams;
3437
+ // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
3438
+ if (params.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD) {
3439
+ pad_to_square = false;
3440
+ }
3441
+
3442
+ if (clip_is_minicpmv(ctx)) {
3443
+ auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
3444
+ std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
3445
+
3446
+ for (size_t i = 0; i < imgs.size(); ++i) {
3447
+ // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
3448
+ clip_image_f32_ptr res(clip_image_f32_init());
3449
+ normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
3450
+ res_imgs->entries.push_back(std::move(res));
3451
+ }
3452
+
3453
+ res_imgs->grid_x = inst.grid_size.width;
3454
+ res_imgs->grid_y = inst.grid_size.height;
3455
+ return true;
3456
+
3457
+ } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) {
3458
+ clip_image_u8 resized;
3459
+ auto patch_size = params.patch_size * 2;
3460
+ auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, patch_size, params.image_size);
3461
+ image_manipulation::bicubic_resize(*img, resized, new_size.width, new_size.height);
3462
+
3463
+ clip_image_f32_ptr img_f32(clip_image_f32_init());
3464
+ // clip_image_f32_ptr res(clip_image_f32_init());
3465
+ normalize_image_u8_to_f32(resized, *img_f32, params.image_mean, params.image_std);
3466
+ // res_imgs->data[0] = *res;
3467
+ res_imgs->entries.push_back(std::move(img_f32));
3468
+ return true;
3469
+ }
3470
+ else if (ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE
3471
+ || ctx->proj_type() == PROJECTOR_TYPE_GEMMA3
3472
+ || ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3
3473
+ || ctx->proj_type() == PROJECTOR_TYPE_INTERNVL // TODO @ngxson : support dynamic resolution
3474
+ ) {
3475
+ clip_image_u8 resized_image;
3476
+ int sz = params.image_size;
3477
+ image_manipulation::resize_and_pad_image(*img, resized_image, {sz, sz});
3478
+ clip_image_f32_ptr img_f32(clip_image_f32_init());
3479
+ //clip_image_save_to_bmp(resized_image, "resized.bmp");
3480
+ normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
3481
+ res_imgs->entries.push_back(std::move(img_f32));
3482
+ return true;
3483
+
3484
+ } else if (ctx->proj_type() == PROJECTOR_TYPE_PIXTRAL) {
3485
+ clip_image_u8 resized_image;
3486
+ auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, params.patch_size, params.image_size);
3487
+ image_manipulation::bilinear_resize(*img, resized_image, new_size.width, new_size.height);
3488
+ clip_image_f32_ptr img_f32(clip_image_f32_init());
3489
+ normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
3490
+ res_imgs->entries.push_back(std::move(img_f32));
3491
+ return true;
3492
+
3493
+ } else if (ctx->proj_type() == PROJECTOR_TYPE_LLAMA4) {
3494
+ LM_GGML_ASSERT(!params.image_res_candidates.empty());
3495
+ auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
3496
+ std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
3497
+
3498
+ for (size_t i = 0; i < imgs.size(); ++i) {
3499
+ clip_image_f32_ptr res(clip_image_f32_init());
3500
+ normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
3501
+ res_imgs->entries.push_back(std::move(res));
3502
+ }
3503
+
3504
+ res_imgs->grid_x = inst.grid_size.width;
3505
+ res_imgs->grid_y = inst.grid_size.height;
3506
+ return true;
3507
+
3508
+ } else if (ctx->proj_type() == PROJECTOR_TYPE_LFM2) {
3509
+ LM_GGML_ASSERT(params.proj_scale_factor);
3510
+
3511
+ // smart resize
3512
+ const int width = img->nx;
3513
+ const int height = img->ny;
3514
+ const int total_factor = params.patch_size * params.proj_scale_factor;
3515
+ constexpr int min_image_tokens = 64;
3516
+ constexpr int max_image_tokens = 1024;
3517
+ const float min_pixels = min_image_tokens * total_factor * total_factor;
3518
+ const float max_pixels = max_image_tokens * total_factor * total_factor;
3519
+
3520
+ auto round_by_factor = [f = total_factor](float x) { return static_cast<int>(std::nearbyintf(x / static_cast<float>(f))) * f; };
3521
+ auto ceil_by_factor = [f = total_factor](float x) { return static_cast<int>(std::ceil(x / static_cast<float>(f))) * f; };
3522
+ auto floor_by_factor = [f = total_factor](float x) { return static_cast<int>(std::floor(x / static_cast<float>(f))) * f; };
3523
+
3524
+ int h_bar = std::max(total_factor, round_by_factor(height));
3525
+ int w_bar = std::max(total_factor, round_by_factor(width));
3526
+
3527
+ if (h_bar * w_bar > max_pixels) {
3528
+ const auto beta = std::sqrt((height * width) / max_pixels);
3529
+ h_bar = std::max(total_factor, floor_by_factor(height / beta));
3530
+ w_bar = std::max(total_factor, floor_by_factor(width / beta));
3531
+ } else if (h_bar * w_bar < min_pixels) {
3532
+ const auto beta = std::sqrt(min_pixels / (height * width));
3533
+ h_bar = ceil_by_factor(height * beta);
3534
+ w_bar = ceil_by_factor(width * beta);
3535
+ }
3536
+
3537
+ const std::array<uint8_t, 3> pad_color = {122, 116, 104};
3538
+
3539
+ clip_image_u8 resized_img;
3540
+ image_manipulation::resize_and_pad_image(*img, resized_img, clip_image_size{w_bar, h_bar}, pad_color);
3541
+ clip_image_f32_ptr res(clip_image_f32_init());
3542
+ normalize_image_u8_to_f32(resized_img, *res, params.image_mean, params.image_std);
3543
+ res_imgs->entries.push_back(std::move(res));
3544
+ return true;
3545
+ }
3546
+
3547
+ // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
3548
+ // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
3549
+
3550
+ clip_image_u8_ptr temp(clip_image_u8_init()); // we will keep the input image data here temporarily
3551
+
3552
+ if (pad_to_square) {
3553
+ // for llava-1.5, we resize image to a square, and pad the shorter side with a background color
3554
+ // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
3555
+ const int longer_side = std::max(img->nx, img->ny);
3556
+ temp->nx = longer_side;
3557
+ temp->ny = longer_side;
3558
+ temp->buf.resize(3 * longer_side * longer_side);
3559
+
3560
+ // background color in RGB from LLaVA (this is the mean rgb color * 255)
3561
+ const std::array<uint8_t, 3> pad_color = {122, 116, 104};
3562
+
3563
+ // resize the image to the target_size
3564
+ image_manipulation::resize_and_pad_image(*img, *temp, clip_image_size{params.image_size, params.image_size}, pad_color);
3565
+
3566
+ clip_image_f32_ptr res(clip_image_f32_init());
3567
+ normalize_image_u8_to_f32(*temp, *res, params.image_mean, params.image_std);
3568
+ res_imgs->entries.push_back(std::move(res));
3569
+ return true;
3570
+
3571
+ } else if (!params.image_res_candidates.empty()) {
3572
+ // "spatial_unpad" with "anyres" processing for llava-1.6
3573
+ auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
3574
+ std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
3575
+
3576
+ for (size_t i = 0; i < imgs.size(); ++i) {
3577
+ // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
3578
+ clip_image_f32_ptr res(clip_image_f32_init());
3579
+ normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
3580
+ res_imgs->entries.push_back(std::move(res));
3581
+ }
3582
+
3583
+ return true;
3584
+ } else {
3585
+ LM_GGML_ABORT("Unknown image preprocessing type");
3586
+ }
3587
+
3588
+ }
3589
+
3590
+ lm_ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
3591
+ return ctx->model.image_newline;
3592
+ }
3593
+
3594
+ void clip_free(clip_ctx * ctx) {
3595
+ if (ctx == nullptr) {
3596
+ return;
3597
+ }
3598
+ delete ctx;
3599
+ }
3600
+
3601
+ // deprecated
3602
+ size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
3603
+ const int32_t nx = ctx->model.hparams.image_size;
3604
+ const int32_t ny = ctx->model.hparams.image_size;
3605
+ return clip_embd_nbytes_by_img(ctx, nx, ny);
3606
+ }
3607
+
3608
+ size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_w, int img_h) {
3609
+ clip_image_f32 img;
3610
+ img.nx = img_w;
3611
+ img.ny = img_h;
3612
+ return clip_n_output_tokens(ctx, &img) * clip_n_mmproj_embd(ctx) * sizeof(float);
3613
+ }
3614
+
3615
+ int32_t clip_get_image_size(const struct clip_ctx * ctx) {
3616
+ return ctx->model.hparams.image_size;
3617
+ }
3618
+
3619
+ int32_t clip_get_patch_size(const struct clip_ctx * ctx) {
3620
+ return ctx->model.hparams.patch_size;
3621
+ }
3622
+
3623
+ int32_t clip_get_hidden_size(const struct clip_ctx * ctx) {
3624
+ return ctx->model.hparams.n_embd;
3625
+ }
3626
+
3627
+ const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
3628
+ return ctx->model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat";
3629
+ }
3630
+
3631
+ int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
3632
+ const auto & params = ctx->model.hparams;
3633
+ const int n_total = clip_n_output_tokens(ctx, img);
3634
+ if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) {
3635
+ return img->nx / (params.patch_size * 2) + (int)(img->nx % params.patch_size > 0);
3636
+ }
3637
+ return n_total;
3638
+ }
3639
+
3640
+ int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
3641
+ const auto & params = ctx->model.hparams;
3642
+ if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) {
3643
+ return img->ny / (params.patch_size * 2) + (int)(img->ny % params.patch_size > 0);
3644
+ }
3645
+ return 1;
3646
+ }
3647
+
3648
+ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
3649
+ const auto & params = ctx->model.hparams;
3650
+
3651
+ // for models with fixed size image, the input image is already pre-processed and resized to square
3652
+ int patch_size = params.patch_size;
3653
+ int n_patches = (img->nx / patch_size) * (img->ny / patch_size);
3654
+
3655
+ projector_type proj = ctx->proj_type();
3656
+
3657
+ switch (proj) {
3658
+ case PROJECTOR_TYPE_MLP:
3659
+ case PROJECTOR_TYPE_MLP_NORM:
3660
+ {
3661
+ // do nothing
3662
+ } break;
3663
+ case PROJECTOR_TYPE_LDP:
3664
+ case PROJECTOR_TYPE_LDPV2:
3665
+ case PROJECTOR_TYPE_GLM_EDGE:
3666
+ {
3667
+ n_patches /= 4;
3668
+ if (ctx->model.mm_glm_tok_boi) {
3669
+ n_patches += 2; // for BOI and EOI token embeddings
3670
+ }
3671
+ } break;
3672
+ case PROJECTOR_TYPE_MINICPMV:
3673
+ {
3674
+ // Use actual config value if available, otherwise fall back to hardcoded values
3675
+ if (params.minicpmv_query_num > 0) {
3676
+ n_patches = params.minicpmv_query_num;
3677
+ } else {
3678
+ // Fallback to hardcoded values for legacy models
3679
+ if (params.minicpmv_version == 2) {
3680
+ n_patches = 96;
3681
+ } else if (params.minicpmv_version == 3) {
3682
+ n_patches = 64;
3683
+ } else if (params.minicpmv_version == 4) {
3684
+ n_patches = 64;
3685
+ } else if (params.minicpmv_version == 5) {
3686
+ // MiniCPM-V 4.0
3687
+ n_patches = 64;
3688
+ } else {
3689
+ LM_GGML_ABORT("Unknown minicpmv version");
3690
+ }
3691
+ }
3692
+ } break;
3693
+ case PROJECTOR_TYPE_QWEN2VL:
3694
+ case PROJECTOR_TYPE_QWEN25VL:
3695
+ {
3696
+ // dynamic size (2 conv, so double patch size)
3697
+ int patch_size = params.patch_size * 2;
3698
+ int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0);
3699
+ int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0);
3700
+ n_patches = x_patch * y_patch;
3701
+ } break;
3702
+ case PROJECTOR_TYPE_GEMMA3:
3703
+ case PROJECTOR_TYPE_IDEFICS3:
3704
+ case PROJECTOR_TYPE_INTERNVL:
3705
+ case PROJECTOR_TYPE_LLAMA4:
3706
+ case PROJECTOR_TYPE_LFM2:
3707
+ {
3708
+ // both W and H are divided by proj_scale_factor
3709
+ int scale_factor = ctx->model.hparams.proj_scale_factor;
3710
+ n_patches /= (scale_factor * scale_factor);
3711
+ } break;
3712
+ case PROJECTOR_TYPE_PIXTRAL:
3713
+ {
3714
+ // dynamic size
3715
+ int n_merge = params.spatial_merge_size;
3716
+ int n_patches_x = img->nx / patch_size / (n_merge > 0 ? n_merge : 1);
3717
+ int n_patches_y = img->ny / patch_size / (n_merge > 0 ? n_merge : 1);
3718
+ n_patches = n_patches_y * n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row
3719
+ } break;
3720
+ case PROJECTOR_TYPE_VOXTRAL:
3721
+ case PROJECTOR_TYPE_ULTRAVOX:
3722
+ case PROJECTOR_TYPE_QWEN2A:
3723
+ {
3724
+ n_patches = img->nx;
3725
+
3726
+ const int proj_stack_factor = ctx->model.hparams.proj_stack_factor;
3727
+ if (ctx->model.audio_has_stack_frames()) {
3728
+ LM_GGML_ASSERT(proj_stack_factor > 0);
3729
+ const int n_len = CLIP_ALIGN(n_patches, proj_stack_factor);
3730
+ n_patches = n_len / proj_stack_factor;
3731
+ }
3732
+
3733
+ // whisper downscales input token by half after conv1d
3734
+ n_patches /= 2;
3735
+
3736
+ if (ctx->model.audio_has_avgpool()) {
3737
+ // divide by 2 because of nn.AvgPool1d(2, stride=2)
3738
+ n_patches /= 2;
3739
+ }
3740
+ } break;
3741
+ default:
3742
+ LM_GGML_ABORT("unsupported projector type");
3743
+ }
3744
+
3745
+ return n_patches;
3746
+ }
3747
+
3748
+ static std::vector<std::vector<std::vector<float>>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector<std::vector<float>> & pos) {
3749
+ assert(embed_dim % 2 == 0);
3750
+ int H = pos.size();
3751
+ int W = pos[0].size();
3752
+
3753
+ std::vector<float> omega(embed_dim / 2);
3754
+ for (int i = 0; i < embed_dim / 2; ++i) {
3755
+ omega[i] = 1.0 / pow(10000.0, static_cast<float>(i) / (embed_dim / 2));
3756
+ }
3757
+
3758
+ std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
3759
+ for (int h = 0; h < H; ++h) {
3760
+ for (int w = 0; w < W; ++w) {
3761
+ for (int d = 0; d < embed_dim / 2; ++d) {
3762
+ float out_value = pos[h][w] * omega[d];
3763
+ emb[h][w][d] = sin(out_value);
3764
+ emb[h][w][d + embed_dim / 2] = cos(out_value);
3765
+ }
3766
+ }
3767
+ }
3768
+
3769
+ return emb;
3770
+ }
3771
+
3772
+ static std::vector<std::vector<std::vector<float>>> get_2d_sincos_pos_embed_from_grid(int embed_dim, const std::vector<std::vector<std::vector<float>>> & grid) {
3773
+ assert(embed_dim % 2 == 0);
3774
+ std::vector<std::vector<std::vector<float>>> emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[0]); // (H, W, D/2)
3775
+ std::vector<std::vector<std::vector<float>>> emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[1]); // (H, W, D/2)
3776
+
3777
+ int H = emb_h.size();
3778
+ int W = emb_h[0].size();
3779
+ std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
3780
+
3781
+ for (int h = 0; h < H; ++h) {
3782
+ for (int w = 0; w < W; ++w) {
3783
+ for (int d = 0; d < embed_dim / 2; ++d) {
3784
+ emb[h][w][d] = emb_h[h][w][d];
3785
+ emb[h][w][d + embed_dim / 2] = emb_w[h][w][d];
3786
+ }
3787
+ }
3788
+ }
3789
+ return emb;
3790
+ }
3791
+
3792
+ static std::vector<std::vector<float>> get_2d_sincos_pos_embed(int embed_dim, const std::pair<int, int> image_size) {
3793
+ int grid_h_size = image_size.first;
3794
+ int grid_w_size = image_size.second;
3795
+
3796
+ std::vector<float> grid_h(grid_h_size);
3797
+ std::vector<float> grid_w(grid_w_size);
3798
+
3799
+ for (int i = 0; i < grid_h_size; ++i) {
3800
+ grid_h[i] = static_cast<float>(i);
3801
+ }
3802
+ for (int i = 0; i < grid_w_size; ++i) {
3803
+ grid_w[i] = static_cast<float>(i);
3804
+ }
3805
+
3806
+ std::vector<std::vector<float>> grid(grid_h_size, std::vector<float>(grid_w_size));
3807
+ for (int h = 0; h < grid_h_size; ++h) {
3808
+ for (int w = 0; w < grid_w_size; ++w) {
3809
+ grid[h][w] = grid_w[w];
3810
+ }
3811
+ }
3812
+ std::vector<std::vector<std::vector<float>>> grid_2d = {grid, grid};
3813
+ for (int h = 0; h < grid_h_size; ++h) {
3814
+ for (int w = 0; w < grid_w_size; ++w) {
3815
+ grid_2d[0][h][w] = grid_h[h];
3816
+ grid_2d[1][h][w] = grid_w[w];
3817
+ }
3818
+ }
3819
+
3820
+ std::vector<std::vector<std::vector<float>>> pos_embed_3d = get_2d_sincos_pos_embed_from_grid(embed_dim, grid_2d);
3821
+
3822
+ int H = image_size.first;
3823
+ int W = image_size.second;
3824
+ std::vector<std::vector<float>> pos_embed_2d(H * W, std::vector<float>(embed_dim));
3825
+ for (int h = 0; h < H; ++h) {
3826
+ for (int w = 0; w < W; ++w) {
3827
+ pos_embed_2d[w * H + h] = pos_embed_3d[h][w];
3828
+ }
3829
+ }
3830
+
3831
+ return pos_embed_2d;
3832
+ }
3833
+
3834
+ bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
3835
+ clip_image_f32_batch imgs;
3836
+ clip_image_f32_ptr img_copy(clip_image_f32_init());
3837
+ *img_copy = *img;
3838
+ imgs.entries.push_back(std::move(img_copy));
3839
+
3840
+ return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
3841
+ }
3842
+
3843
+ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs_c_ptr, float * vec) {
3844
+ const clip_image_f32_batch & imgs = *imgs_c_ptr;
3845
+ int batch_size = imgs.entries.size();
3846
+
3847
+ // TODO @ngxson : implement batch size > 1 as a loop
3848
+ // we don't need true batching support because the cgraph will gonna be big anyway
3849
+ if (batch_size != 1) {
3850
+ return false; // only support batch size of 1
3851
+ }
3852
+
3853
+ // build the inference graph
3854
+ ctx->debug_print_tensors.clear();
3855
+ lm_ggml_backend_sched_reset(ctx->sched.get());
3856
+ lm_ggml_cgraph * gf = clip_image_build_graph(ctx, imgs);
3857
+ lm_ggml_backend_sched_alloc_graph(ctx->sched.get(), gf);
3858
+
3859
+ // set inputs
3860
+ const auto & model = ctx->model;
3861
+ const auto & hparams = model.hparams;
3862
+
3863
+ const int image_size_width = imgs.entries[0]->nx;
3864
+ const int image_size_height = imgs.entries[0]->ny;
3865
+
3866
+ const int patch_size = hparams.patch_size;
3867
+ const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
3868
+ const int n_pos = num_patches + (model.class_embedding ? 1 : 0);
3869
+ const int pos_w = image_size_width / patch_size;
3870
+ const int pos_h = image_size_height / patch_size;
3871
+
3872
+ const bool use_window_attn = hparams.n_wa_pattern > 0; // for qwen2.5vl
3873
+
3874
+ auto get_inp_tensor = [&gf](const char * name) {
3875
+ lm_ggml_tensor * inp = lm_ggml_graph_get_tensor(gf, name);
3876
+ if (inp == nullptr) {
3877
+ LM_GGML_ABORT("Failed to get tensor %s", name);
3878
+ }
3879
+ if (!(inp->flags & LM_GGML_TENSOR_FLAG_INPUT)) {
3880
+ LM_GGML_ABORT("Tensor %s is not an input tensor", name);
3881
+ }
3882
+ return inp;
3883
+ };
3884
+
3885
+ auto set_input_f32 = [&get_inp_tensor](const char * name, std::vector<float> & values) {
3886
+ lm_ggml_tensor * cur = get_inp_tensor(name);
3887
+ LM_GGML_ASSERT(cur->type == LM_GGML_TYPE_F32);
3888
+ LM_GGML_ASSERT(lm_ggml_nelements(cur) == (int64_t)values.size());
3889
+ lm_ggml_backend_tensor_set(cur, values.data(), 0, lm_ggml_nbytes(cur));
3890
+ };
3891
+
3892
+ auto set_input_i32 = [&get_inp_tensor](const char * name, std::vector<int32_t> & values) {
3893
+ lm_ggml_tensor * cur = get_inp_tensor(name);
3894
+ LM_GGML_ASSERT(cur->type == LM_GGML_TYPE_I32);
3895
+ LM_GGML_ASSERT(lm_ggml_nelements(cur) == (int64_t)values.size());
3896
+ lm_ggml_backend_tensor_set(cur, values.data(), 0, lm_ggml_nbytes(cur));
3897
+ };
3898
+
3899
+ // set input pixel values
3900
+ if (!imgs.is_audio) {
3901
+ size_t nelem = 0;
3902
+ for (const auto & img : imgs.entries) {
3903
+ nelem += img->nx * img->ny * 3;
3904
+ }
3905
+ std::vector<float> inp_raw(nelem);
3906
+
3907
+ // layout of data (note: the channel dim is unrolled to better visualize the layout):
3908
+ //
3909
+ // ┌──W──┐
3910
+ // │ H │ channel = R
3911
+ // ├─────┤ │
3912
+ // │ H │ channel = G
3913
+ // ├─────┤ │
3914
+ // │ H │ channel = B
3915
+ // └─────┘ │
3916
+ // ──────┘ x B
3917
+
3918
+ for (size_t i = 0; i < imgs.entries.size(); i++) {
3919
+ const int nx = imgs.entries[i]->nx;
3920
+ const int ny = imgs.entries[i]->ny;
3921
+ const int n = nx * ny;
3922
+
3923
+ for (int b = 0; b < batch_size; b++) {
3924
+ float * batch_entry = inp_raw.data() + b * (3*n);
3925
+ for (int y = 0; y < ny; y++) {
3926
+ for (int x = 0; x < nx; x++) {
3927
+ size_t base_src = 3*(y * nx + x); // idx of the first channel
3928
+ size_t base_dst = y * nx + x; // idx of the first channel
3929
+ batch_entry[ base_dst] = imgs.entries[b]->buf[base_src ];
3930
+ batch_entry[1*n + base_dst] = imgs.entries[b]->buf[base_src + 1];
3931
+ batch_entry[2*n + base_dst] = imgs.entries[b]->buf[base_src + 2];
3932
+ }
3933
+ }
3934
+ }
3935
+ }
3936
+ set_input_f32("inp_raw", inp_raw);
3937
+
3938
+ } else {
3939
+ // audio input
3940
+ LM_GGML_ASSERT(imgs.entries.size() == 1);
3941
+ const auto & mel_inp = imgs.entries[0];
3942
+ const int n_step = mel_inp->nx;
3943
+ const int n_mel = mel_inp->ny;
3944
+ std::vector<float> inp_raw(n_step * n_mel);
3945
+ std::memcpy(inp_raw.data(), mel_inp->buf.data(), n_step * n_mel * sizeof(float));
3946
+ set_input_f32("inp_raw", inp_raw);
3947
+ }
3948
+
3949
+ // set input per projector
3950
+ switch (ctx->model.proj_type) {
3951
+ case PROJECTOR_TYPE_MINICPMV:
3952
+ {
3953
+ // inspired from siglip:
3954
+ // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
3955
+ // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316
3956
+ std::vector<int32_t> positions(pos_h * pos_w);
3957
+ int bucket_coords_h[1024];
3958
+ int bucket_coords_w[1024];
3959
+ for (int i = 0; i < pos_h; i++){
3960
+ bucket_coords_h[i] = std::floor(70.0*i/pos_h);
3961
+ }
3962
+ for (int i = 0; i < pos_w; i++){
3963
+ bucket_coords_w[i] = std::floor(70.0*i/pos_w);
3964
+ }
3965
+ for (int i = 0, id = 0; i < pos_h; i++){
3966
+ for (int j = 0; j < pos_w; j++){
3967
+ positions[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j];
3968
+ }
3969
+ }
3970
+ set_input_i32("positions", positions);
3971
+
3972
+ // inspired from resampler of Qwen-VL:
3973
+ // -> https://huggingface.co/Qwen/Qwen-VL/tree/main
3974
+ // -> https://huggingface.co/Qwen/Qwen-VL/blob/0547ed36a86561e2e42fecec8fd0c4f6953e33c4/visual.py#L23
3975
+ int embed_dim = clip_n_mmproj_embd(ctx);
3976
+
3977
+ // TODO @ngxson : this is very inefficient, can we do this using lm_ggml_sin and lm_ggml_cos?
3978
+ auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h));
3979
+
3980
+ std::vector<float> pos_embed(embed_dim * pos_w * pos_h);
3981
+ for(int i = 0; i < pos_w * pos_h; ++i){
3982
+ for(int j = 0; j < embed_dim; ++j){
3983
+ pos_embed[i * embed_dim + j] = pos_embed_t[i][j];
3984
+ }
3985
+ }
3986
+
3987
+ set_input_f32("pos_embed", pos_embed);
3988
+ } break;
3989
+ case PROJECTOR_TYPE_QWEN2VL:
3990
+ {
3991
+ const int merge_ratio = 2;
3992
+ const int pw = image_size_width / patch_size;
3993
+ const int ph = image_size_height / patch_size;
3994
+ std::vector<int> positions(n_pos * 4);
3995
+ int ptr = 0;
3996
+ for (int y = 0; y < ph; y += merge_ratio) {
3997
+ for (int x = 0; x < pw; x += merge_ratio) {
3998
+ for (int dy = 0; dy < 2; dy++) {
3999
+ for (int dx = 0; dx < 2; dx++) {
4000
+ positions[ ptr] = y + dy;
4001
+ positions[ num_patches + ptr] = x + dx;
4002
+ positions[2 * num_patches + ptr] = y + dy;
4003
+ positions[3 * num_patches + ptr] = x + dx;
4004
+ ptr++;
4005
+ }
4006
+ }
4007
+ }
4008
+ }
4009
+
4010
+ set_input_i32("positions", positions);
4011
+ } break;
4012
+ case PROJECTOR_TYPE_QWEN25VL:
4013
+ {
4014
+ // pw * ph = number of tokens output by ViT after apply patch merger
4015
+ // ipw * ipw = number of vision token been processed inside ViT
4016
+ const int merge_ratio = 2;
4017
+ const int pw = image_size_width / patch_size / merge_ratio;
4018
+ const int ph = image_size_height / patch_size / merge_ratio;
4019
+ const int ipw = image_size_width / patch_size;
4020
+ const int iph = image_size_height / patch_size;
4021
+
4022
+ std::vector<int> idx (ph * pw);
4023
+ std::vector<int> inv_idx(ph * pw);
4024
+
4025
+ if (use_window_attn) {
4026
+ const int attn_window_size = 112;
4027
+ const int grid_window = attn_window_size / patch_size / merge_ratio;
4028
+ int dst = 0;
4029
+ // [num_vision_tokens, num_vision_tokens] attention mask tensor
4030
+ std::vector<float> mask(pow(ipw * iph, 2), std::numeric_limits<float>::lowest());
4031
+ int mask_row = 0;
4032
+
4033
+ for (int y = 0; y < ph; y += grid_window) {
4034
+ for (int x = 0; x < pw; x += grid_window) {
4035
+ const int win_h = std::min(grid_window, ph - y);
4036
+ const int win_w = std::min(grid_window, pw - x);
4037
+ const int dst_0 = dst;
4038
+ // group all tokens belong to the same window togather (to a continue range)
4039
+ for (int dy = 0; dy < win_h; dy++) {
4040
+ for (int dx = 0; dx < win_w; dx++) {
4041
+ const int src = (y + dy) * pw + (x + dx);
4042
+ LM_GGML_ASSERT(src < (int)idx.size());
4043
+ LM_GGML_ASSERT(dst < (int)inv_idx.size());
4044
+ idx [src] = dst;
4045
+ inv_idx[dst] = src;
4046
+ dst++;
4047
+ }
4048
+ }
4049
+
4050
+ for (int r=0; r < win_h * win_w * merge_ratio * merge_ratio; r++) {
4051
+ int row_offset = mask_row * (ipw * iph);
4052
+ std::fill(
4053
+ mask.begin() + row_offset + (dst_0 * merge_ratio * merge_ratio),
4054
+ mask.begin() + row_offset + (dst * merge_ratio * merge_ratio),
4055
+ 0.0);
4056
+ mask_row++;
4057
+ }
4058
+ }
4059
+ }
4060
+
4061
+ set_input_i32("window_idx", idx);
4062
+ set_input_i32("inv_window_idx", inv_idx);
4063
+ set_input_f32("window_mask", mask);
4064
+ } else {
4065
+ for (int i = 0; i < ph * pw; i++) {
4066
+ idx[i] = i;
4067
+ }
4068
+ }
4069
+
4070
+ const int mpow = merge_ratio * merge_ratio;
4071
+ std::vector<int> positions(n_pos * 4);
4072
+
4073
+ int ptr = 0;
4074
+ for (int y = 0; y < iph; y += merge_ratio) {
4075
+ for (int x = 0; x < ipw; x += merge_ratio) {
4076
+ for (int dy = 0; dy < 2; dy++) {
4077
+ for (int dx = 0; dx < 2; dx++) {
4078
+ auto remap = idx[ptr / mpow];
4079
+ remap = (remap * mpow) + (ptr % mpow);
4080
+
4081
+ positions[ remap] = y + dy;
4082
+ positions[ num_patches + remap] = x + dx;
4083
+ positions[2 * num_patches + remap] = y + dy;
4084
+ positions[3 * num_patches + remap] = x + dx;
4085
+ ptr++;
4086
+ }
4087
+ }
4088
+ }
4089
+ }
4090
+
4091
+ set_input_i32("positions", positions);
4092
+ } break;
4093
+ case PROJECTOR_TYPE_PIXTRAL:
4094
+ {
4095
+ // set the 2D positions
4096
+ int n_patches_per_col = image_size_width / patch_size;
4097
+ std::vector<int> pos_data(n_pos);
4098
+ // dimension H
4099
+ for (int i = 0; i < n_pos; i++) {
4100
+ pos_data[i] = i / n_patches_per_col;
4101
+ }
4102
+ set_input_i32("pos_h", pos_data);
4103
+ // dimension W
4104
+ for (int i = 0; i < n_pos; i++) {
4105
+ pos_data[i] = i % n_patches_per_col;
4106
+ }
4107
+ set_input_i32("pos_w", pos_data);
4108
+ } break;
4109
+ case PROJECTOR_TYPE_GLM_EDGE:
4110
+ {
4111
+ // llava and other models
4112
+ std::vector<int32_t> positions(n_pos);
4113
+ for (int i = 0; i < n_pos; i++) {
4114
+ positions[i] = i;
4115
+ }
4116
+ set_input_i32("positions", positions);
4117
+ } break;
4118
+ case PROJECTOR_TYPE_MLP:
4119
+ case PROJECTOR_TYPE_MLP_NORM:
4120
+ case PROJECTOR_TYPE_LDP:
4121
+ case PROJECTOR_TYPE_LDPV2:
4122
+ {
4123
+ // llava and other models
4124
+ std::vector<int32_t> positions(n_pos);
4125
+ for (int i = 0; i < n_pos; i++) {
4126
+ positions[i] = i;
4127
+ }
4128
+ set_input_i32("positions", positions);
4129
+
4130
+ // The patches vector is used to get rows to index into the embeds with;
4131
+ // we should skip dim 0 only if we have CLS to avoid going out of bounds
4132
+ // when retrieving the rows.
4133
+ int patch_offset = model.class_embedding ? 1 : 0;
4134
+ std::vector<int32_t> patches(num_patches);
4135
+ for (int i = 0; i < num_patches; i++) {
4136
+ patches[i] = i + patch_offset;
4137
+ }
4138
+ set_input_i32("patches", patches);
4139
+ } break;
4140
+ case PROJECTOR_TYPE_GEMMA3:
4141
+ case PROJECTOR_TYPE_IDEFICS3:
4142
+ case PROJECTOR_TYPE_INTERNVL:
4143
+ case PROJECTOR_TYPE_QWEN2A:
4144
+ case PROJECTOR_TYPE_ULTRAVOX:
4145
+ case PROJECTOR_TYPE_LFM2:
4146
+ case PROJECTOR_TYPE_VOXTRAL:
4147
+ {
4148
+ // do nothing
4149
+ } break;
4150
+ case PROJECTOR_TYPE_LLAMA4:
4151
+ {
4152
+ // set the 2D positions
4153
+ int n_patches_per_col = image_size_width / patch_size;
4154
+ std::vector<int> pos_data(num_patches + 1, 0); // +1 for the [CLS] token
4155
+ // last pos is always kept 0, it's for CLS
4156
+ // dimension H
4157
+ for (int i = 0; i < num_patches; i++) {
4158
+ pos_data[i] = (i / n_patches_per_col) + 1;
4159
+ }
4160
+ set_input_i32("pos_h", pos_data);
4161
+ // dimension W
4162
+ for (int i = 0; i < num_patches; i++) {
4163
+ pos_data[i] = (i % n_patches_per_col) + 1;
4164
+ }
4165
+ set_input_i32("pos_w", pos_data);
4166
+ } break;
4167
+ default:
4168
+ LM_GGML_ABORT("Unknown projector type");
4169
+ }
4170
+
4171
+ // lm_ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads);
4172
+ lm_ggml_backend_dev_t dev = lm_ggml_backend_get_device(ctx->backend_cpu);
4173
+ lm_ggml_backend_reg_t reg = dev ? lm_ggml_backend_dev_backend_reg(dev) : nullptr;
4174
+ if (reg) {
4175
+ auto lm_ggml_backend_set_n_threads_fn = (lm_ggml_backend_set_n_threads_t) lm_ggml_backend_reg_get_proc_address(reg, "lm_ggml_backend_set_n_threads");
4176
+ if (lm_ggml_backend_set_n_threads_fn) {
4177
+ lm_ggml_backend_set_n_threads_fn(ctx->backend_cpu, n_threads);
4178
+ }
4179
+ }
4180
+
4181
+ auto status = lm_ggml_backend_sched_graph_compute(ctx->sched.get(), gf);
4182
+ if (status != LM_GGML_STATUS_SUCCESS) {
4183
+ LOG_ERR("%s: lm_ggml_backend_sched_graph_compute failed with error %d\n", __func__, status);
4184
+ return false;
4185
+ }
4186
+
4187
+ // print debug nodes
4188
+ if (ctx->debug_graph) {
4189
+ LOG_INF("\n\n---\n\n");
4190
+ LOG_INF("\n\nDebug graph:\n\n");
4191
+ for (lm_ggml_tensor * t : ctx->debug_print_tensors) {
4192
+ std::vector<uint8_t> data(lm_ggml_nbytes(t));
4193
+ lm_ggml_backend_tensor_get(t, data.data(), 0, lm_ggml_nbytes(t));
4194
+ print_tensor_shape(t);
4195
+ print_tensor_data(t, data.data(), 3);
4196
+ }
4197
+ }
4198
+
4199
+ // the last node is the embedding tensor
4200
+ lm_ggml_tensor * embeddings = lm_ggml_graph_node(gf, -1);
4201
+
4202
+ // sanity check (only support batch size of 1 for now)
4203
+ const int n_tokens_out = embeddings->ne[1];
4204
+ const int expected_n_tokens_out = clip_n_output_tokens(ctx, imgs.entries[0].get());
4205
+ if (n_tokens_out != expected_n_tokens_out) {
4206
+ LOG_ERR("%s: expected output %d tokens, got %d\n", __func__, expected_n_tokens_out, n_tokens_out);
4207
+ LM_GGML_ABORT("Invalid number of output tokens");
4208
+ }
4209
+
4210
+ // copy the embeddings to the location passed by the user
4211
+ lm_ggml_backend_tensor_get(embeddings, vec, 0, lm_ggml_nbytes(embeddings));
4212
+
4213
+ return true;
4214
+ }
4215
+
4216
+ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
4217
+ switch (ctx->model.proj_type) {
4218
+ case PROJECTOR_TYPE_LDP:
4219
+ return ctx->model.mm_model_block_1_block_2_1_b->ne[0];
4220
+ case PROJECTOR_TYPE_LDPV2:
4221
+ return ctx->model.mm_model_peg_0_b->ne[0];
4222
+ case PROJECTOR_TYPE_MLP:
4223
+ case PROJECTOR_TYPE_PIXTRAL:
4224
+ return ctx->model.mm_2_w->ne[1];
4225
+ case PROJECTOR_TYPE_MLP_NORM:
4226
+ return ctx->model.mm_3_b->ne[0];
4227
+ case PROJECTOR_TYPE_MINICPMV:
4228
+ return ctx->model.mm_model_proj->ne[0];
4229
+ case PROJECTOR_TYPE_GLM_EDGE:
4230
+ return ctx->model.mm_model_mlp_3_w->ne[1];
4231
+ case PROJECTOR_TYPE_QWEN2VL:
4232
+ case PROJECTOR_TYPE_QWEN25VL:
4233
+ return ctx->model.mm_1_b->ne[0];
4234
+ case PROJECTOR_TYPE_GEMMA3:
4235
+ return ctx->model.mm_input_proj_w->ne[0];
4236
+ case PROJECTOR_TYPE_IDEFICS3:
4237
+ return ctx->model.projection->ne[1];
4238
+ case PROJECTOR_TYPE_ULTRAVOX:
4239
+ case PROJECTOR_TYPE_VOXTRAL:
4240
+ return ctx->model.mm_2_w->ne[1];
4241
+ case PROJECTOR_TYPE_INTERNVL:
4242
+ return ctx->model.mm_3_w->ne[1];
4243
+ case PROJECTOR_TYPE_LLAMA4:
4244
+ return ctx->model.mm_model_proj->ne[1];
4245
+ case PROJECTOR_TYPE_QWEN2A:
4246
+ return ctx->model.mm_fc_w->ne[1];
4247
+ case PROJECTOR_TYPE_LFM2:
4248
+ return ctx->model.mm_2_w->ne[1];
4249
+ default:
4250
+ LM_GGML_ABORT("Unknown projector type");
4251
+ }
4252
+ }
4253
+
4254
+ int clip_is_minicpmv(const struct clip_ctx * ctx) {
4255
+ if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV) {
4256
+ return ctx->model.hparams.minicpmv_version;
4257
+ }
4258
+ return 0;
4259
+ }
4260
+
4261
+ bool clip_is_glm(const struct clip_ctx * ctx) {
4262
+ return ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE;
4263
+ }
4264
+
4265
+ bool clip_is_qwen2vl(const struct clip_ctx * ctx) {
4266
+ return ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL
4267
+ || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL;
4268
+ }
4269
+
4270
+ bool clip_is_llava(const struct clip_ctx * ctx) {
4271
+ return ctx->model.hparams.has_llava_projector;
4272
+ }
4273
+
4274
+ bool clip_is_gemma3(const struct clip_ctx * ctx) {
4275
+ return ctx->proj_type() == PROJECTOR_TYPE_GEMMA3;
4276
+ }
4277
+
4278
+ bool clip_has_vision_encoder(const struct clip_ctx * ctx) {
4279
+ return ctx->model.modality == CLIP_MODALITY_VISION;
4280
+ }
4281
+
4282
+ bool clip_has_audio_encoder(const struct clip_ctx * ctx) {
4283
+ return ctx->model.modality == CLIP_MODALITY_AUDIO;
4284
+ }
4285
+
4286
+ bool clip_has_whisper_encoder(const struct clip_ctx * ctx) {
4287
+ return ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX
4288
+ || ctx->proj_type() == PROJECTOR_TYPE_QWEN2A
4289
+ || ctx->proj_type() == PROJECTOR_TYPE_VOXTRAL;
4290
+ }
4291
+
4292
+ bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) {
4293
+ clip_image_f32 clip_img;
4294
+ clip_img.buf.resize(h * w * 3);
4295
+ for (int i = 0; i < h*w*3; i++)
4296
+ {
4297
+ clip_img.buf[i] = img[i];
4298
+ }
4299
+ clip_img.nx = w;
4300
+ clip_img.ny = h;
4301
+ clip_image_encode(ctx, n_threads, &clip_img, vec);
4302
+ return true;
4303
+ }
4304
+
4305
+ //
4306
+ // API used internally with mtmd
4307
+ //
4308
+
4309
+ projector_type clip_get_projector_type(const struct clip_ctx * ctx) {
4310
+ return ctx->proj_type();
4311
+ }
4312
+
4313
+ void clip_image_f32_batch_add_mel(struct clip_image_f32_batch * batch, int n_mel, int n_frames, float * mel) {
4314
+ clip_image_f32 * audio = new clip_image_f32;
4315
+ audio->nx = n_frames;
4316
+ audio->ny = n_mel;
4317
+ audio->buf.resize(n_frames * n_mel);
4318
+ std::memcpy(audio->buf.data(), mel, n_frames * n_mel * sizeof(float));
4319
+
4320
+ batch->entries.push_back(clip_image_f32_ptr(audio));
4321
+ batch->is_audio = true;
4322
+ }