@fugood/llama.node 0.3.15 → 0.3.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (203) hide show
  1. package/CMakeLists.txt +3 -0
  2. package/bin/darwin/arm64/llama-node.node +0 -0
  3. package/bin/darwin/x64/llama-node.node +0 -0
  4. package/bin/linux/arm64/llama-node.node +0 -0
  5. package/bin/linux/x64/llama-node.node +0 -0
  6. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  7. package/bin/linux-cuda/x64/llama-node.node +0 -0
  8. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  9. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  10. package/bin/win32/arm64/llama-node.node +0 -0
  11. package/bin/win32/arm64/node.lib +0 -0
  12. package/bin/win32/x64/llama-node.node +0 -0
  13. package/bin/win32/x64/node.lib +0 -0
  14. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  15. package/bin/win32-vulkan/arm64/node.lib +0 -0
  16. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  17. package/bin/win32-vulkan/x64/node.lib +0 -0
  18. package/lib/binding.ts +5 -0
  19. package/package.json +1 -1
  20. package/src/LlamaCompletionWorker.cpp +8 -0
  21. package/src/LlamaCompletionWorker.h +1 -0
  22. package/src/LlamaContext.cpp +3 -2
  23. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +124 -0
  24. package/src/llama.cpp/.github/workflows/build.yml +70 -27
  25. package/src/llama.cpp/.github/workflows/docker.yml +6 -6
  26. package/src/llama.cpp/.github/workflows/server.yml +7 -11
  27. package/src/llama.cpp/CMakeLists.txt +23 -1
  28. package/src/llama.cpp/common/CMakeLists.txt +6 -3
  29. package/src/llama.cpp/common/arg.cpp +809 -105
  30. package/src/llama.cpp/common/arg.h +9 -0
  31. package/src/llama.cpp/common/chat.cpp +1 -1
  32. package/src/llama.cpp/common/common.cpp +31 -521
  33. package/src/llama.cpp/common/common.h +17 -36
  34. package/src/llama.cpp/common/json-schema-to-grammar.cpp +3 -0
  35. package/src/llama.cpp/common/llguidance.cpp +30 -47
  36. package/src/llama.cpp/common/minja/chat-template.hpp +15 -7
  37. package/src/llama.cpp/common/minja/minja.hpp +119 -93
  38. package/src/llama.cpp/common/sampling.cpp +3 -0
  39. package/src/llama.cpp/docs/build.md +122 -7
  40. package/src/llama.cpp/examples/CMakeLists.txt +0 -9
  41. package/src/llama.cpp/examples/batched/batched.cpp +1 -1
  42. package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +1 -1
  43. package/src/llama.cpp/examples/embedding/embedding.cpp +7 -1
  44. package/src/llama.cpp/examples/export-lora/export-lora.cpp +1 -1
  45. package/src/llama.cpp/examples/gguf-split/gguf-split.cpp +15 -16
  46. package/src/llama.cpp/examples/gritlm/gritlm.cpp +1 -1
  47. package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +210 -8
  48. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +1 -0
  49. package/src/llama.cpp/examples/llava/CMakeLists.txt +39 -24
  50. package/src/llama.cpp/examples/llava/clip-impl.h +345 -0
  51. package/src/llama.cpp/examples/llava/clip.cpp +2152 -1803
  52. package/src/llama.cpp/examples/llava/clip.h +39 -22
  53. package/src/llama.cpp/examples/llava/deprecation-warning.cpp +22 -0
  54. package/src/llama.cpp/examples/llava/llava.cpp +64 -52
  55. package/src/llama.cpp/examples/llava/mtmd-cli.cpp +344 -0
  56. package/src/llama.cpp/examples/llava/mtmd.cpp +708 -0
  57. package/src/llama.cpp/examples/llava/mtmd.h +168 -0
  58. package/src/llama.cpp/examples/llava/{qwen2vl-cli.cpp → qwen2vl-test.cpp} +83 -31
  59. package/src/llama.cpp/examples/main/main.cpp +16 -5
  60. package/src/llama.cpp/examples/parallel/parallel.cpp +3 -1
  61. package/src/llama.cpp/examples/passkey/passkey.cpp +1 -1
  62. package/src/llama.cpp/examples/perplexity/perplexity.cpp +17 -3
  63. package/src/llama.cpp/examples/quantize/quantize.cpp +115 -2
  64. package/src/llama.cpp/examples/rpc/CMakeLists.txt +4 -2
  65. package/src/llama.cpp/examples/rpc/rpc-server.cpp +163 -8
  66. package/src/llama.cpp/examples/run/CMakeLists.txt +12 -1
  67. package/src/llama.cpp/examples/run/run.cpp +14 -28
  68. package/src/llama.cpp/examples/server/httplib.h +313 -247
  69. package/src/llama.cpp/examples/server/server.cpp +243 -139
  70. package/src/llama.cpp/examples/server/utils.hpp +51 -2
  71. package/src/llama.cpp/examples/speculative/speculative.cpp +1 -1
  72. package/src/llama.cpp/examples/speculative-simple/speculative-simple.cpp +1 -1
  73. package/src/llama.cpp/examples/sycl/build.sh +2 -2
  74. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +2 -2
  75. package/src/llama.cpp/examples/tts/tts.cpp +14 -9
  76. package/src/llama.cpp/ggml/CMakeLists.txt +8 -2
  77. package/src/llama.cpp/ggml/cmake/GitVars.cmake +22 -0
  78. package/src/llama.cpp/ggml/include/ggml-cpu.h +5 -0
  79. package/src/llama.cpp/ggml/include/ggml-rpc.h +6 -1
  80. package/src/llama.cpp/ggml/include/ggml.h +66 -99
  81. package/src/llama.cpp/ggml/src/CMakeLists.txt +15 -8
  82. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +0 -2
  83. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +8 -4
  84. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +5 -5
  85. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +692 -1534
  86. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +613 -122
  87. package/src/llama.cpp/ggml/src/ggml-cann/common.h +135 -1
  88. package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +507 -137
  89. package/src/llama.cpp/ggml/src/ggml-common.h +12 -6
  90. package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +48 -22
  91. package/src/llama.cpp/ggml/src/ggml-cpu/binary-ops.cpp +158 -0
  92. package/src/llama.cpp/ggml/src/ggml-cpu/binary-ops.h +16 -0
  93. package/src/llama.cpp/ggml/src/ggml-cpu/common.h +72 -0
  94. package/src/llama.cpp/ggml/src/ggml-cpu/cpu-feats-x86.cpp +1 -1
  95. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +2413 -228
  96. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h +2 -21
  97. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +754 -404
  98. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +1004 -13516
  99. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +2 -0
  100. package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.cpp +2 -7
  101. package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.h +0 -1
  102. package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +3 -4
  103. package/src/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +533 -88
  104. package/src/llama.cpp/ggml/src/ggml-cpu/ops.cpp +8809 -0
  105. package/src/llama.cpp/ggml/src/ggml-cpu/ops.h +110 -0
  106. package/src/llama.cpp/ggml/src/ggml-cpu/simd-mappings.h +892 -0
  107. package/src/llama.cpp/ggml/src/ggml-cpu/unary-ops.cpp +186 -0
  108. package/src/llama.cpp/ggml/src/ggml-cpu/unary-ops.h +28 -0
  109. package/src/llama.cpp/ggml/src/ggml-cpu/vec.cpp +258 -0
  110. package/src/llama.cpp/ggml/src/ggml-cpu/vec.h +802 -0
  111. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +7 -0
  112. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +1 -0
  113. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +0 -4
  114. package/src/llama.cpp/ggml/src/ggml-impl.h +52 -18
  115. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +70 -3
  116. package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +67 -119
  117. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +1023 -260
  118. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +293 -40
  119. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +127 -33
  120. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +1 -0
  121. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +350 -0
  122. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp +39 -0
  123. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +0 -35
  124. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +29 -293
  125. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +79 -90
  126. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +967 -438
  127. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +22 -23
  128. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +12 -43
  129. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +24 -20
  130. package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +1 -4
  131. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +210 -286
  132. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +84 -74
  133. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +1 -3
  134. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +37 -49
  135. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +7 -22
  136. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +4 -14
  137. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +204 -118
  138. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +1 -3
  139. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +23 -0
  140. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +692 -126
  141. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +12 -0
  142. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +21 -10
  143. package/src/llama.cpp/ggml/src/ggml.c +141 -245
  144. package/src/llama.cpp/ggml/src/gguf.cpp +1 -0
  145. package/src/llama.cpp/include/llama.h +30 -11
  146. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.inp +112 -0
  147. package/src/llama.cpp/models/ggml-vocab-llama4.gguf.out +46 -0
  148. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +112 -0
  149. package/src/llama.cpp/models/ggml-vocab-pixtral.gguf.out +46 -0
  150. package/src/llama.cpp/requirements/requirements-all.txt +2 -0
  151. package/src/llama.cpp/requirements/requirements-gguf_editor_gui.txt +3 -0
  152. package/src/llama.cpp/src/CMakeLists.txt +3 -2
  153. package/src/llama.cpp/src/llama-adapter.cpp +37 -1
  154. package/src/llama.cpp/src/llama-arch.cpp +161 -17
  155. package/src/llama.cpp/src/llama-arch.h +16 -0
  156. package/src/llama.cpp/src/llama-chat.cpp +82 -17
  157. package/src/llama.cpp/src/llama-chat.h +6 -2
  158. package/src/llama.cpp/src/llama-context.cpp +108 -92
  159. package/src/llama.cpp/src/llama-context.h +1 -2
  160. package/src/llama.cpp/src/llama-graph.cpp +189 -119
  161. package/src/llama.cpp/src/llama-graph.h +26 -6
  162. package/src/llama.cpp/src/llama-hparams.h +13 -0
  163. package/src/llama.cpp/src/llama-kv-cache.cpp +70 -123
  164. package/src/llama.cpp/src/llama-kv-cache.h +41 -115
  165. package/src/llama.cpp/src/llama-memory.h +1 -1
  166. package/src/llama.cpp/src/llama-mmap.cpp +1 -1
  167. package/src/llama.cpp/src/llama-model-loader.cpp +10 -5
  168. package/src/llama.cpp/src/llama-model-loader.h +5 -3
  169. package/src/llama.cpp/src/llama-model.cpp +1544 -291
  170. package/src/llama.cpp/src/llama-model.h +13 -1
  171. package/src/llama.cpp/src/llama-quant.cpp +29 -8
  172. package/src/llama.cpp/src/llama-sampling.cpp +7 -1
  173. package/src/llama.cpp/src/llama-vocab.cpp +44 -6
  174. package/src/llama.cpp/src/llama.cpp +1 -1
  175. package/src/llama.cpp/tests/CMakeLists.txt +43 -30
  176. package/src/llama.cpp/tests/test-arg-parser.cpp +51 -4
  177. package/src/llama.cpp/tests/test-backend-ops.cpp +139 -57
  178. package/src/llama.cpp/tests/test-chat-template.cpp +34 -13
  179. package/src/llama.cpp/tests/test-chat.cpp +12 -2
  180. package/src/llama.cpp/{examples/gbnf-validator/gbnf-validator.cpp → tests/test-gbnf-validator.cpp} +2 -2
  181. package/src/llama.cpp/tests/test-grammar-integration.cpp +3 -2
  182. package/src/llama.cpp/tests/test-grammar-llguidance.cpp +63 -2
  183. package/src/llama.cpp/tests/test-grammar-parser.cpp +3 -1
  184. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +17 -1
  185. package/src/llama.cpp/tests/test-llama-grammar.cpp +2 -1
  186. package/src/llama.cpp/{examples/quantize-stats/quantize-stats.cpp → tests/test-quantize-stats.cpp} +3 -1
  187. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +2 -1
  188. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +2 -1
  189. package/src/llama.cpp/examples/gbnf-validator/CMakeLists.txt +0 -5
  190. package/src/llama.cpp/examples/llava/gemma3-cli.cpp +0 -341
  191. package/src/llama.cpp/examples/llava/llava-cli.cpp +0 -332
  192. package/src/llama.cpp/examples/llava/minicpmv-cli.cpp +0 -354
  193. package/src/llama.cpp/examples/quantize-stats/CMakeLists.txt +0 -6
  194. package/src/llama.cpp/ggml/src/ggml-cann/kernels/CMakeLists.txt +0 -30
  195. package/src/llama.cpp/ggml/src/ggml-cann/kernels/ascendc_kernels.h +0 -19
  196. package/src/llama.cpp/ggml/src/ggml-cann/kernels/dup.cpp +0 -234
  197. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f16.cpp +0 -197
  198. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f32.cpp +0 -190
  199. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +0 -204
  200. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_q8_0.cpp +0 -191
  201. package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +0 -218
  202. package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +0 -216
  203. package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +0 -295
@@ -1,332 +0,0 @@
1
- #include "arg.h"
2
- #include "base64.hpp"
3
- #include "log.h"
4
- #include "common.h"
5
- #include "sampling.h"
6
- #include "clip.h"
7
- #include "llava.h"
8
- #include "llama.h"
9
- #include "ggml.h"
10
-
11
- #include <cstdio>
12
- #include <cstdlib>
13
- #include <cstring>
14
- #include <vector>
15
-
16
- static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_token> tokens, int n_batch, int * n_past) {
17
- int N = (int) tokens.size();
18
- for (int i = 0; i < N; i += n_batch) {
19
- int n_eval = (int) tokens.size() - i;
20
- if (n_eval > n_batch) {
21
- n_eval = n_batch;
22
- }
23
- if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval))) {
24
- LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
25
- return false;
26
- }
27
- *n_past += n_eval;
28
- }
29
- return true;
30
- }
31
-
32
- static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
33
- std::vector<llama_token> tokens;
34
- tokens.push_back(id);
35
- return eval_tokens(ctx_llama, tokens, 1, n_past);
36
- }
37
-
38
- static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
39
- std::string str2 = str;
40
- std::vector<llama_token> embd_inp = common_tokenize(ctx_llama, str2, add_bos, true);
41
- eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
42
- return true;
43
- }
44
-
45
- static const char * sample(struct common_sampler * smpl,
46
- struct llama_context * ctx_llama,
47
- int * n_past) {
48
- const llama_token id = common_sampler_sample(smpl, ctx_llama, -1);
49
- common_sampler_accept(smpl, id, true);
50
-
51
- const llama_model * model = llama_get_model(ctx_llama);
52
- const llama_vocab * vocab = llama_model_get_vocab(model);
53
-
54
- static std::string ret;
55
- if (llama_vocab_is_eog(vocab, id)) {
56
- ret = "</s>";
57
- } else {
58
- ret = common_token_to_piece(ctx_llama, id);
59
- }
60
- eval_id(ctx_llama, id, n_past);
61
- return ret.c_str();
62
- }
63
-
64
- static const char* IMG_BASE64_TAG_BEGIN = "<img src=\"data:image/jpeg;base64,";
65
- static const char* IMG_BASE64_TAG_END = "\">";
66
-
67
- static void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) {
68
- begin_out = prompt.find(IMG_BASE64_TAG_BEGIN);
69
- end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out);
70
- }
71
-
72
- static bool prompt_contains_image(const std::string& prompt) {
73
- size_t begin, end;
74
- find_image_tag_in_prompt(prompt, begin, end);
75
- return (begin != std::string::npos);
76
- }
77
-
78
- // replaces the base64 image tag in the prompt with `replacement`
79
- static llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) {
80
- size_t img_base64_str_start, img_base64_str_end;
81
- find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end);
82
- if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) {
83
- LOG_ERR("%s: invalid base64 image tag. must be %s<base64 byte string>%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END);
84
- return NULL;
85
- }
86
-
87
- auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN);
88
- auto base64_bytes_count = img_base64_str_end - base64_bytes_start;
89
- auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count );
90
-
91
- auto required_bytes = base64::required_encode_size(base64_str.size());
92
- auto img_bytes = std::vector<unsigned char>(required_bytes);
93
- base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin());
94
-
95
- auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, img_bytes.data(), img_bytes.size());
96
- if (!embed) {
97
- LOG_ERR("%s: could not load image from base64 string.\n", __func__);
98
- return NULL;
99
- }
100
-
101
- return embed;
102
- }
103
-
104
- static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") {
105
- size_t begin, end;
106
- find_image_tag_in_prompt(prompt, begin, end);
107
- if (begin == std::string::npos || end == std::string::npos) {
108
- return prompt;
109
- }
110
- auto pre = prompt.substr(0, begin);
111
- auto post = prompt.substr(end + strlen(IMG_BASE64_TAG_END));
112
- return pre + replacement + post;
113
- }
114
-
115
- struct llava_context {
116
- struct clip_ctx * ctx_clip = NULL;
117
- struct llama_context * ctx_llama = NULL;
118
- struct llama_model * model = NULL;
119
- };
120
-
121
- static void print_usage(int, char ** argv) {
122
- LOG("\n example usage:\n");
123
- LOG("\n %s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
124
- LOG("\n note: a lower temperature value like 0.1 is recommended for better quality.\n");
125
- }
126
-
127
- static struct llava_image_embed * load_image(llava_context * ctx_llava, common_params * params, const std::string & fname) {
128
-
129
- // load and preprocess the image
130
- llava_image_embed * embed = NULL;
131
- auto prompt = params->prompt;
132
- if (prompt_contains_image(prompt)) {
133
- if (!params->image.empty()) {
134
- LOG_INF("using base64 encoded image instead of command line image path\n");
135
- }
136
- embed = llava_image_embed_make_with_prompt_base64(ctx_llava->ctx_clip, params->cpuparams.n_threads, prompt);
137
- if (!embed) {
138
- LOG_ERR("%s: can't load image from prompt\n", __func__);
139
- return NULL;
140
- }
141
- params->prompt = remove_image_from_prompt(prompt);
142
- } else {
143
- embed = llava_image_embed_make_with_filename(ctx_llava->ctx_clip, params->cpuparams.n_threads, fname.c_str());
144
- if (!embed) {
145
- fprintf(stderr, "%s: is %s really an image file?\n", __func__, fname.c_str());
146
- return NULL;
147
- }
148
- }
149
-
150
- return embed;
151
- }
152
-
153
- static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, common_params * params, const std::string & prompt) {
154
- int n_past = 0;
155
-
156
- const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict;
157
-
158
- std::string system_prompt, user_prompt;
159
- size_t image_pos = prompt.find("").c_str(), params->n_batch, &n_past, false);
154
- if (num_image_embeds > 1) {
155
- if (has_minicpmv_projector == 2) {
156
- size_t num_image_embeds_col = clip_uhd_num_image_embeds_col(ctx_llava->ctx_clip);
157
- eval_string(ctx_llava->ctx_llama, std::string("<slice>").c_str(), params->n_batch, &n_past, false);
158
- for (size_t i = 0; i < (num_image_embeds-1)/num_image_embeds_col; ++i) {
159
- for (size_t j = 0; j < num_image_embeds_col; ++j) {
160
- eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false);
163
- if (j == num_image_embeds_col - 1) {
164
- eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false);
165
- }
166
- }
167
- }
168
- eval_string(ctx_llava->ctx_llama, std::string("</slice>").c_str(), params->n_batch, &n_past, false);
169
- }
170
- else if (has_minicpmv_projector == 3 || has_minicpmv_projector == 4) {
171
- size_t num_image_embeds_col = clip_uhd_num_image_embeds_col(ctx_llava->ctx_clip);
172
- for (size_t i = 0; i < (num_image_embeds-1)/num_image_embeds_col; ++i) {
173
- for (size_t j = 0; j < num_image_embeds_col; ++j) {
174
- eval_string(ctx_llava->ctx_llama, std::string("<slice>").c_str(), params->n_batch, &n_past, false);
175
- process_eval_image_embed(ctx_llava, embeds, params->n_batch, &n_past, idx++);
176
- eval_string(ctx_llava->ctx_llama, std::string("</slice>").c_str(), params->n_batch, &n_past, false);
177
- if (j == num_image_embeds_col - 1) {
178
- eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false);
179
- }
180
- }
181
- }
182
- }
183
- }
184
- LOG_INF("%s: image token past: %d\n", __func__, n_past);
185
- }
186
-
187
- static const char * sample(struct common_sampler * smpl,
188
- struct llama_context * ctx_llama,
189
- int * n_past) {
190
- const llama_token id = common_sampler_sample(smpl, ctx_llama, -1);
191
- common_sampler_accept(smpl, id, true);
192
-
193
- const llama_model * model = llama_get_model(ctx_llama);
194
- const llama_vocab * vocab = llama_model_get_vocab(model);
195
-
196
- static std::string ret;
197
- if (llama_vocab_is_eog(vocab, id)) {
198
- ret = "</s>";
199
- } else {
200
- ret = common_token_to_piece(ctx_llama, id);
201
- }
202
- eval_id(ctx_llama, id, n_past);
203
- return ret.c_str();
204
- }
205
-
206
- static struct llava_context * minicpmv_init(common_params * params, const std::string & fname, int &n_past){
207
- auto * ctx_clip = clip_init_context(params);
208
- auto * embeds = llava_image_embed_make_with_filename(ctx_clip, params->cpuparams.n_threads, fname.c_str());
209
- if (!embeds) {
210
- LOG_ERR("failed to load image %s. Terminating\n\n", fname.c_str());
211
- return NULL;
212
- }
213
-
214
- // process the prompt
215
- if (params->prompt.empty() && params->interactive == false) {
216
- LOG_ERR("prompt should be given or interactive mode should be on");
217
- return NULL;
218
- }
219
-
220
- auto * model = llava_init(params);
221
- if (model == NULL) {
222
- fprintf(stderr, "%s: error: failed to init minicpmv model\n", __func__);
223
- return NULL;
224
- }
225
- const int64_t t_llava_init_start_us = ggml_time_us();
226
- auto * ctx_llava = llava_init_context(params, model);
227
- ctx_llava->ctx_clip = ctx_clip;
228
- const int64_t t_llava_init_end_us = ggml_time_us();
229
- float t_llava_init_ms = (t_llava_init_end_us - t_llava_init_start_us) / 1000.0;
230
- LOG_INF("%s: llava init in %8.2f ms.\n", __func__, t_llava_init_ms);
231
-
232
- const int64_t t_process_image_start_us = ggml_time_us();
233
- process_image(ctx_llava, embeds, params, n_past);
234
- const int64_t t_process_image_end_us = ggml_time_us();
235
- float t_process_image_ms = (t_process_image_end_us - t_process_image_start_us) / 1000.0;
236
- LOG_INF("%s: llama process image in %8.2f ms.\n", __func__, t_process_image_ms);
237
-
238
- llava_image_embed_free(embeds);
239
- return ctx_llava;
240
- }
241
-
242
- static struct common_sampler * llama_init(struct llava_context * ctx_llava, common_params * params, const std::string & prompt, int & n_past, bool is_first = false){
243
- std::string user_prompt = prompt;
244
- int has_minicpmv_projector = clip_is_minicpmv(ctx_llava->ctx_clip);
245
- if (!is_first) {
246
- if (has_minicpmv_projector == 2) {
247
- user_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n" + prompt;
248
- }
249
- else if (has_minicpmv_projector == 3) {
250
- user_prompt = "<|im_start|>user\n" + prompt;
251
- }
252
- else if (has_minicpmv_projector == 4) {
253
- user_prompt = "<|im_start|>user\n" + prompt;
254
- }
255
- }
256
-
257
- eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false);
258
- if (has_minicpmv_projector == 2) {
259
- eval_string(ctx_llava->ctx_llama, "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", params->n_batch, &n_past, false);
260
- }
261
- else if (has_minicpmv_projector == 3) {
262
- eval_string(ctx_llava->ctx_llama, "<|im_end|><|im_start|>assistant\n", params->n_batch, &n_past, false);
263
- }
264
- else if (has_minicpmv_projector == 4) {
265
- eval_string(ctx_llava->ctx_llama, "<|im_end|><|im_start|>assistant\n", params->n_batch, &n_past, false);
266
- }
267
-
268
- // generate the response
269
-
270
- LOG_INF("\n");
271
-
272
- struct common_sampler * smpl = common_sampler_init(ctx_llava->model, params->sampling);
273
- return smpl;
274
- }
275
-
276
- static const char * llama_loop(struct llava_context * ctx_llava,struct common_sampler * smpl, int &n_past){
277
-
278
- const char * tmp = sample(smpl, ctx_llava->ctx_llama, &n_past);
279
- return tmp;
280
- }
281
-
282
- int main(int argc, char ** argv) {
283
- ggml_time_init();
284
-
285
- common_params params;
286
-
287
- if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, show_additional_info)) {
288
- return 1;
289
- }
290
-
291
- common_init();
292
-
293
- if (params.mmproj.empty() || (params.image.empty())) {
294
- show_additional_info(argc, argv);
295
- return 1;
296
- }
297
-
298
- for (auto & image : params.image) {
299
- int n_past = 0;
300
- auto * ctx_llava = minicpmv_init(&params, image, n_past);
301
-
302
- if (!params.prompt.empty()) {
303
- LOG("<user>%s\n", params.prompt.c_str());
304
- LOG("<assistant>");
305
- auto * smpl = llama_init(ctx_llava, &params, params.prompt, n_past, true);
306
- const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
307
- std::string response;
308
- bool have_tmp = false;
309
- for (int i = 0; i < max_tgt_len; i++) {
310
- const auto * tmp = llama_loop(ctx_llava, smpl, n_past);
311
- response += tmp;
312
- if (strcmp(tmp, "</s>") == 0){
313
- if (!have_tmp) {
314
- continue;
315
- }
316
- break;
317
- }
318
- if (strstr(tmp, "###")) break; // Yi-VL behavior
319
- have_tmp = true;
320
- printf("%s", tmp);
321
- if (strstr(response.c_str(), "<user>")) break; // minicpm-v
322
-
323
- fflush(stdout);
324
- }
325
- common_sampler_free(smpl);
326
- }else {
327
- while (true) {
328
- LOG("<user>");
329
- std::string prompt;
330
- std::getline(std::cin, prompt);
331
- LOG("<assistant>");
332
- auto * smpl = llama_init(ctx_llava, &params, prompt, n_past, true);
333
- const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
334
- std::string response;
335
- for (int i = 0; i < max_tgt_len; i++) {
336
- const auto * tmp = llama_loop(ctx_llava, smpl, n_past);
337
- response += tmp;
338
- if (strcmp(tmp, "</s>") == 0) break;
339
- printf("%s", tmp);// mistral llava-1.6
340
- if (strstr(response.c_str(), "<user>")) break; // minicpm-v
341
- fflush(stdout);
342
- }
343
- common_sampler_free(smpl);
344
- }
345
- }
346
- printf("\n");
347
- llama_perf_context_print(ctx_llava->ctx_llama);
348
-
349
- ctx_llava->model = NULL;
350
- llava_free(ctx_llava);
351
- }
352
-
353
- return 0;
354
- }
@@ -1,6 +0,0 @@
1
- set(TARGET llama-quantize-stats)
2
- add_executable(${TARGET} quantize-stats.cpp)
3
- install(TARGETS ${TARGET} RUNTIME)
4
- target_link_libraries(${TARGET} PRIVATE llama build_info ${CMAKE_THREAD_LIBS_INIT})
5
- target_include_directories(${TARGET} PRIVATE ../../common)
6
- target_compile_features(${TARGET} PRIVATE cxx_std_17)