@fugood/llama.node 0.4.7 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CMakeLists.txt +4 -0
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-cuda/arm64/llama-node.node +0 -0
- package/bin/linux-cuda/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/lib/binding.ts +66 -6
- package/lib/index.js +59 -17
- package/lib/index.ts +74 -23
- package/package.json +1 -1
- package/src/DecodeAudioTokenWorker.cpp +40 -0
- package/src/DecodeAudioTokenWorker.h +22 -0
- package/src/EmbeddingWorker.cpp +7 -5
- package/src/LlamaCompletionWorker.cpp +68 -54
- package/src/LlamaCompletionWorker.h +7 -8
- package/src/LlamaContext.cpp +551 -235
- package/src/LlamaContext.h +26 -4
- package/src/LoadSessionWorker.cpp +4 -2
- package/src/SaveSessionWorker.cpp +10 -6
- package/src/TokenizeWorker.cpp +23 -14
- package/src/TokenizeWorker.h +2 -2
- package/src/addons.cc +8 -11
- package/src/common.hpp +129 -126
- package/src/llama.cpp/.github/workflows/build.yml +2 -2
- package/src/llama.cpp/.github/workflows/release.yml +152 -129
- package/src/llama.cpp/.github/workflows/winget.yml +42 -0
- package/src/llama.cpp/common/arg.cpp +14 -13
- package/src/llama.cpp/common/common.cpp +4 -75
- package/src/llama.cpp/common/common.h +7 -12
- package/src/llama.cpp/examples/lookahead/lookahead.cpp +0 -13
- package/src/llama.cpp/examples/lookup/lookup.cpp +0 -11
- package/src/llama.cpp/examples/parallel/parallel.cpp +0 -9
- package/src/llama.cpp/examples/retrieval/retrieval.cpp +6 -6
- package/src/llama.cpp/examples/simple/simple.cpp +1 -1
- package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +2 -2
- package/src/llama.cpp/examples/sycl/run-llama2.sh +4 -4
- package/src/llama.cpp/examples/sycl/run-llama3.sh +28 -0
- package/src/llama.cpp/examples/sycl/win-run-llama2.bat +1 -1
- package/src/llama.cpp/examples/sycl/win-run-llama3.bat +9 -0
- package/src/llama.cpp/ggml/include/ggml-opt.h +2 -0
- package/src/llama.cpp/ggml/include/ggml.h +11 -0
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +274 -0
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +27 -0
- package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +18 -2
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +1 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ops.cpp +107 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/vec.h +16 -0
- package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +8 -2
- package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +315 -155
- package/src/llama.cpp/ggml/src/ggml-opt.cpp +5 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +43 -12
- package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +171 -112
- package/src/llama.cpp/ggml/src/ggml.c +64 -18
- package/src/llama.cpp/include/llama.h +24 -124
- package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +5 -1
- package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +5 -1
- package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +2 -0
- package/src/llama.cpp/src/llama-batch.cpp +3 -1
- package/src/llama.cpp/src/llama-context.cpp +60 -110
- package/src/llama.cpp/src/llama-graph.cpp +137 -233
- package/src/llama.cpp/src/llama-graph.h +49 -7
- package/src/llama.cpp/src/llama-hparams.cpp +17 -1
- package/src/llama.cpp/src/llama-hparams.h +34 -5
- package/src/llama.cpp/src/llama-kv-cache.cpp +654 -321
- package/src/llama.cpp/src/llama-kv-cache.h +201 -85
- package/src/llama.cpp/src/llama-memory.h +3 -2
- package/src/llama.cpp/src/llama-model.cpp +273 -94
- package/src/llama.cpp/src/llama-model.h +4 -1
- package/src/llama.cpp/tests/test-arg-parser.cpp +1 -1
- package/src/llama.cpp/tools/llama-bench/llama-bench.cpp +1 -0
- package/src/llama.cpp/tools/mtmd/CMakeLists.txt +13 -2
- package/src/llama.cpp/tools/mtmd/clip-impl.h +108 -11
- package/src/llama.cpp/tools/mtmd/clip.cpp +466 -88
- package/src/llama.cpp/tools/mtmd/clip.h +6 -4
- package/src/llama.cpp/tools/mtmd/miniaudio.h +93468 -0
- package/src/llama.cpp/tools/mtmd/mtmd-audio.cpp +855 -0
- package/src/llama.cpp/tools/mtmd/mtmd-audio.h +62 -0
- package/src/llama.cpp/tools/mtmd/mtmd-cli.cpp +21 -14
- package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +36 -49
- package/src/llama.cpp/tools/mtmd/mtmd.cpp +362 -98
- package/src/llama.cpp/tools/mtmd/mtmd.h +52 -21
- package/src/llama.cpp/tools/run/run.cpp +2 -2
- package/src/llama.cpp/tools/server/server.cpp +158 -47
- package/src/llama.cpp/tools/server/utils.hpp +71 -43
- package/src/llama.cpp/tools/tts/tts.cpp +4 -2
- package/src/tts_utils.cpp +342 -0
- package/src/tts_utils.h +62 -0
- package/bin/win32/arm64/llama-node.node +0 -0
- package/bin/win32/arm64/node.lib +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
|
@@ -1,5 +1,15 @@
|
|
|
1
1
|
# mtmd
|
|
2
2
|
|
|
3
|
+
# compile mtmd-audio separately to avoid long compile times with miniaudio.h
|
|
4
|
+
# TODO @ngxson : move miniaudio.h and stb_image.h to mtmd-helper.cpp, then compile the helper as a separate library
|
|
5
|
+
add_library(mtmd_audio STATIC mtmd-audio.cpp mtmd-audio.h)
|
|
6
|
+
if (BUILD_SHARED_LIBS)
|
|
7
|
+
set_target_properties(mtmd_audio PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
|
8
|
+
endif()
|
|
9
|
+
target_link_libraries(mtmd_audio PRIVATE ggml ${CMAKE_THREAD_LIBS_INIT})
|
|
10
|
+
target_compile_features(mtmd_audio PRIVATE cxx_std_17)
|
|
11
|
+
target_include_directories(mtmd_audio PRIVATE .)
|
|
12
|
+
|
|
3
13
|
add_library(mtmd OBJECT
|
|
4
14
|
mtmd.cpp
|
|
5
15
|
mtmd-helper.cpp
|
|
@@ -9,7 +19,7 @@ add_library(mtmd OBJECT
|
|
|
9
19
|
clip-impl.h
|
|
10
20
|
)
|
|
11
21
|
|
|
12
|
-
target_link_libraries(mtmd PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT})
|
|
22
|
+
target_link_libraries(mtmd PRIVATE ggml llama mtmd_audio ${CMAKE_THREAD_LIBS_INIT})
|
|
13
23
|
|
|
14
24
|
target_include_directories(mtmd PUBLIC .)
|
|
15
25
|
target_include_directories(mtmd PRIVATE ../..)
|
|
@@ -22,12 +32,13 @@ if (BUILD_SHARED_LIBS)
|
|
|
22
32
|
set_target_properties(mtmd PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
|
23
33
|
target_compile_definitions(mtmd PRIVATE LLAMA_SHARED LLAMA_BUILD)
|
|
24
34
|
add_library(mtmd_shared SHARED $<TARGET_OBJECTS:mtmd>)
|
|
25
|
-
target_link_libraries(mtmd_shared PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT})
|
|
35
|
+
target_link_libraries(mtmd_shared PRIVATE ggml llama mtmd_audio ${CMAKE_THREAD_LIBS_INIT})
|
|
26
36
|
install(TARGETS mtmd_shared LIBRARY)
|
|
27
37
|
endif()
|
|
28
38
|
|
|
29
39
|
if (NOT MSVC)
|
|
30
40
|
target_compile_options(mtmd PRIVATE -Wno-cast-qual) # stb_image.h
|
|
41
|
+
target_compile_options(mtmd_audio PRIVATE -Wno-cast-qual) # miniaudio.h
|
|
31
42
|
endif()
|
|
32
43
|
|
|
33
44
|
if(TARGET BUILD_INFO)
|
|
@@ -4,6 +4,7 @@
|
|
|
4
4
|
|
|
5
5
|
#include <climits>
|
|
6
6
|
#include <cstdarg>
|
|
7
|
+
#include <cinttypes>
|
|
7
8
|
#include <string>
|
|
8
9
|
#include <map>
|
|
9
10
|
#include <sstream>
|
|
@@ -15,22 +16,26 @@
|
|
|
15
16
|
#define KEY_FTYPE "general.file_type"
|
|
16
17
|
#define KEY_NAME "general.name"
|
|
17
18
|
#define KEY_DESCRIPTION "general.description"
|
|
18
|
-
#define
|
|
19
|
+
#define KEY_PROJ_TYPE "clip.projector_type"
|
|
20
|
+
#define KEY_HAS_AUDIO_ENC "clip.has_audio_encoder"
|
|
21
|
+
#define KEY_HAS_VISION_ENC "clip.has_vision_encoder"
|
|
19
22
|
#define KEY_USE_GELU "clip.use_gelu"
|
|
20
23
|
#define KEY_USE_SILU "clip.use_silu"
|
|
21
|
-
|
|
22
|
-
#define
|
|
23
|
-
#define
|
|
24
|
-
#define
|
|
25
|
-
#define
|
|
26
|
-
#define
|
|
24
|
+
|
|
25
|
+
#define KEY_N_EMBD "clip.%s.embedding_length"
|
|
26
|
+
#define KEY_N_FF "clip.%s.feed_forward_length"
|
|
27
|
+
#define KEY_N_BLOCK "clip.%s.block_count"
|
|
28
|
+
#define KEY_PROJ_DIM "clip.%s.projection_dim"
|
|
29
|
+
#define KEY_N_HEAD "clip.%s.attention.head_count"
|
|
30
|
+
#define KEY_LAYER_NORM_EPS "clip.%s.attention.layer_norm_epsilon"
|
|
31
|
+
|
|
32
|
+
// vision-specific
|
|
27
33
|
#define KEY_IMAGE_SIZE "clip.vision.image_size"
|
|
28
34
|
#define KEY_PATCH_SIZE "clip.vision.patch_size"
|
|
29
35
|
#define KEY_IMAGE_MEAN "clip.vision.image_mean"
|
|
30
36
|
#define KEY_IMAGE_STD "clip.vision.image_std"
|
|
31
37
|
#define KEY_FEATURE_LAYER "clip.vision.feature_layer"
|
|
32
38
|
#define KEY_PROJ_SCALE_FACTOR "clip.vision.projector.scale_factor"
|
|
33
|
-
#define KEY_PROJ_TYPE "clip.projector_type"
|
|
34
39
|
#define KEY_SPATIAL_MERGE_SIZE "clip.vision.spatial_merge_size"
|
|
35
40
|
|
|
36
41
|
#define KEY_MM_PATCH_MERGE_TYPE "clip.vision.mm_patch_merge_type"
|
|
@@ -38,6 +43,11 @@
|
|
|
38
43
|
#define KEY_IMAGE_CROP_RESOLUTION "clip.vision.image_crop_resolution"
|
|
39
44
|
#define KEY_WIN_ATTN_PATTERN "clip.vision.n_wa_pattern"
|
|
40
45
|
#define KEY_ATTN_WINDOW_SIZE "clip.vision.window_size"
|
|
46
|
+
#define KEY_MINICPMV_VERSION "clip.minicpmv_version"
|
|
47
|
+
|
|
48
|
+
// audio-specific
|
|
49
|
+
#define KEY_A_NUM_MEL_BINS "clip.audio.num_mel_bins"
|
|
50
|
+
#define KEY_A_PROJ_STACK_FACTOR "clip.audio.projector.stack_factor"
|
|
41
51
|
|
|
42
52
|
|
|
43
53
|
//
|
|
@@ -94,6 +104,12 @@
|
|
|
94
104
|
#define TN_GLM_ADAPTER_GATE "adapter.linear.gate.%s"
|
|
95
105
|
#define TN_GLM_ADAPTER_D_4H_2_H "adapter.linear.dense_4h_to_h.%s"
|
|
96
106
|
|
|
107
|
+
// ultravox
|
|
108
|
+
#define TN_CONV1D "a.conv1d.%d.%s"
|
|
109
|
+
#define TN_MM_AUDIO_MLP "mm.a.mlp.%d.%s"
|
|
110
|
+
#define TN_MM_NORM_PRE "mm.a.norm_pre.%s"
|
|
111
|
+
#define TN_MM_NORM_MID "mm.a.norm_mid.%s"
|
|
112
|
+
|
|
97
113
|
// align x to upper multiple of n
|
|
98
114
|
#define CLIP_ALIGN(x, n) ((((x) + (n) - 1) / (n)) * (n))
|
|
99
115
|
|
|
@@ -109,7 +125,9 @@ enum projector_type {
|
|
|
109
125
|
PROJECTOR_TYPE_IDEFICS3,
|
|
110
126
|
PROJECTOR_TYPE_PIXTRAL,
|
|
111
127
|
PROJECTOR_TYPE_QWEN25VL,
|
|
128
|
+
PROJECTOR_TYPE_ULTRAVOX,
|
|
112
129
|
PROJECTOR_TYPE_INTERNVL,
|
|
130
|
+
PROJECTOR_TYPE_LLAMA4,
|
|
113
131
|
PROJECTOR_TYPE_UNKNOWN,
|
|
114
132
|
};
|
|
115
133
|
|
|
@@ -124,7 +142,9 @@ static std::map<projector_type, std::string> PROJECTOR_TYPE_NAMES = {
|
|
|
124
142
|
{ PROJECTOR_TYPE_GEMMA3, "gemma3"},
|
|
125
143
|
{ PROJECTOR_TYPE_IDEFICS3, "idefics3"},
|
|
126
144
|
{ PROJECTOR_TYPE_PIXTRAL, "pixtral"},
|
|
145
|
+
{ PROJECTOR_TYPE_ULTRAVOX, "ultravox"},
|
|
127
146
|
{ PROJECTOR_TYPE_INTERNVL, "internvl"},
|
|
147
|
+
{ PROJECTOR_TYPE_LLAMA4, "llama4"},
|
|
128
148
|
};
|
|
129
149
|
|
|
130
150
|
static projector_type clip_projector_type_from_string(const std::string & str) {
|
|
@@ -144,8 +164,10 @@ struct clip_image_u8 {
|
|
|
144
164
|
std::vector<uint8_t> buf;
|
|
145
165
|
};
|
|
146
166
|
|
|
147
|
-
//
|
|
148
|
-
//
|
|
167
|
+
// For images, buf.size() == nx*ny*3
|
|
168
|
+
// Memory layout: RGBRGBRGB...
|
|
169
|
+
// For audio, only one channel is used, buf.size() == nx*ny
|
|
170
|
+
// nx will be n_frames and ny will be n_mel
|
|
149
171
|
struct clip_image_f32 {
|
|
150
172
|
int nx;
|
|
151
173
|
int ny;
|
|
@@ -239,9 +261,20 @@ struct clip_image_u8_batch {
|
|
|
239
261
|
|
|
240
262
|
struct clip_image_f32_batch {
|
|
241
263
|
std::vector<clip_image_f32_ptr> entries;
|
|
264
|
+
bool is_audio = false;
|
|
265
|
+
|
|
266
|
+
// for llava-uhd style models, we need to know the grid size
|
|
267
|
+
// note: entries.size() == grid_x * grid_y + 1 (one overview image)
|
|
268
|
+
int grid_x = 0;
|
|
269
|
+
int grid_y = 0;
|
|
242
270
|
|
|
243
271
|
clip_image_f32_batch clone() const {
|
|
244
|
-
clip_image_f32_batch new_batch
|
|
272
|
+
clip_image_f32_batch new_batch{
|
|
273
|
+
/* entries */ {},
|
|
274
|
+
/* is_audio */ is_audio,
|
|
275
|
+
/* grid_x */ grid_x,
|
|
276
|
+
/* grid_y */ grid_y,
|
|
277
|
+
};
|
|
245
278
|
new_batch.entries.reserve(entries.size());
|
|
246
279
|
for (const auto & entry : entries) {
|
|
247
280
|
new_batch.entries.emplace_back(new clip_image_f32(*entry));
|
|
@@ -358,6 +391,70 @@ static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
|
|
|
358
391
|
}
|
|
359
392
|
}
|
|
360
393
|
|
|
394
|
+
//
|
|
395
|
+
// debugging
|
|
396
|
+
//
|
|
397
|
+
|
|
398
|
+
static void print_tensor_shape(ggml_tensor * t) {
|
|
399
|
+
printf("%s.shape = [", t->name);
|
|
400
|
+
for (int i = 0; i < ggml_n_dims(t); ++i) {
|
|
401
|
+
printf("%" PRId64, t->ne[i]);
|
|
402
|
+
if (i < ggml_n_dims(t) - 1) {
|
|
403
|
+
printf(", ");
|
|
404
|
+
}
|
|
405
|
+
}
|
|
406
|
+
printf("]\n");
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
static void print_tensor_data(ggml_tensor * t, uint8_t * data, int64_t n) {
|
|
410
|
+
ggml_type type = t->type;
|
|
411
|
+
int64_t * ne = t->ne;
|
|
412
|
+
size_t * nb = t->nb;
|
|
413
|
+
for (int64_t i3 = 0; i3 < ne[3]; i3++) {
|
|
414
|
+
printf("%s.data: [\n", t->name);
|
|
415
|
+
for (int64_t i2 = 0; i2 < ne[2]; i2++) {
|
|
416
|
+
if (i2 == n && ne[2] > 2*n) {
|
|
417
|
+
printf(" ..., \n");
|
|
418
|
+
i2 = ne[2] - n;
|
|
419
|
+
}
|
|
420
|
+
printf(" [\n");
|
|
421
|
+
for (int64_t i1 = 0; i1 < ne[1]; i1++) {
|
|
422
|
+
if (i1 == n && ne[1] > 2*n) {
|
|
423
|
+
printf(" ..., \n");
|
|
424
|
+
i1 = ne[1] - n;
|
|
425
|
+
}
|
|
426
|
+
printf(" [");
|
|
427
|
+
for (int64_t i0 = 0; i0 < ne[0]; i0++) {
|
|
428
|
+
if (i0 == n && ne[0] > 2*n) {
|
|
429
|
+
printf("..., ");
|
|
430
|
+
i0 = ne[0] - n;
|
|
431
|
+
}
|
|
432
|
+
size_t i = i3 * nb[3] + i2 * nb[2] + i1 * nb[1] + i0 * nb[0];
|
|
433
|
+
float v;
|
|
434
|
+
if (type == GGML_TYPE_F16) {
|
|
435
|
+
v = ggml_fp16_to_fp32(*(ggml_fp16_t *) &data[i]);
|
|
436
|
+
} else if (type == GGML_TYPE_F32) {
|
|
437
|
+
v = *(float *) &data[i];
|
|
438
|
+
} else if (type == GGML_TYPE_I32) {
|
|
439
|
+
v = (float) *(int32_t *) &data[i];
|
|
440
|
+
} else if (type == GGML_TYPE_I16) {
|
|
441
|
+
v = (float) *(int16_t *) &data[i];
|
|
442
|
+
} else if (type == GGML_TYPE_I8) {
|
|
443
|
+
v = (float) *(int8_t *) &data[i];
|
|
444
|
+
} else {
|
|
445
|
+
GGML_ABORT("fatal error");
|
|
446
|
+
}
|
|
447
|
+
printf("%8.4f", v);
|
|
448
|
+
if (i0 < ne[0] - 1) printf(", ");
|
|
449
|
+
}
|
|
450
|
+
printf("],\n");
|
|
451
|
+
}
|
|
452
|
+
printf(" ],\n");
|
|
453
|
+
}
|
|
454
|
+
printf(" ]\n");
|
|
455
|
+
}
|
|
456
|
+
}
|
|
457
|
+
|
|
361
458
|
//
|
|
362
459
|
// API used internally with mtmd
|
|
363
460
|
//
|