@fugood/llama.node 0.0.1-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. package/CMakeLists.txt +85 -0
  2. package/README.md +56 -0
  3. package/bin/darwin/arm64/llama-node.node +0 -0
  4. package/bin/darwin/x64/llama-node.node +0 -0
  5. package/bin/linux/arm64/llama-node.node +0 -0
  6. package/bin/linux/x64/llama-node.node +0 -0
  7. package/bin/win32/arm64/llama-node.node +0 -0
  8. package/bin/win32/arm64/node.lib +0 -0
  9. package/bin/win32/x64/llama-node.node +0 -0
  10. package/bin/win32/x64/node.lib +0 -0
  11. package/lib/binding.js +13 -0
  12. package/lib/binding.ts +57 -0
  13. package/lib/index.js +24 -0
  14. package/lib/index.ts +13 -0
  15. package/package.json +65 -0
  16. package/src/addons.cpp +506 -0
  17. package/src/llama.cpp/CMakeLists.txt +1320 -0
  18. package/src/llama.cpp/build.zig +172 -0
  19. package/src/llama.cpp/cmake/FindSIMD.cmake +100 -0
  20. package/src/llama.cpp/common/CMakeLists.txt +87 -0
  21. package/src/llama.cpp/common/base64.hpp +392 -0
  22. package/src/llama.cpp/common/common.cpp +2949 -0
  23. package/src/llama.cpp/common/common.h +324 -0
  24. package/src/llama.cpp/common/console.cpp +501 -0
  25. package/src/llama.cpp/common/console.h +19 -0
  26. package/src/llama.cpp/common/grammar-parser.cpp +440 -0
  27. package/src/llama.cpp/common/grammar-parser.h +29 -0
  28. package/src/llama.cpp/common/json-schema-to-grammar.cpp +764 -0
  29. package/src/llama.cpp/common/json-schema-to-grammar.h +4 -0
  30. package/src/llama.cpp/common/json.hpp +24766 -0
  31. package/src/llama.cpp/common/log.h +724 -0
  32. package/src/llama.cpp/common/ngram-cache.cpp +282 -0
  33. package/src/llama.cpp/common/ngram-cache.h +94 -0
  34. package/src/llama.cpp/common/sampling.cpp +353 -0
  35. package/src/llama.cpp/common/sampling.h +147 -0
  36. package/src/llama.cpp/common/stb_image.h +8396 -0
  37. package/src/llama.cpp/common/train.cpp +1513 -0
  38. package/src/llama.cpp/common/train.h +233 -0
  39. package/src/llama.cpp/examples/CMakeLists.txt +52 -0
  40. package/src/llama.cpp/examples/baby-llama/CMakeLists.txt +5 -0
  41. package/src/llama.cpp/examples/baby-llama/baby-llama.cpp +1640 -0
  42. package/src/llama.cpp/examples/batched/CMakeLists.txt +5 -0
  43. package/src/llama.cpp/examples/batched/batched.cpp +262 -0
  44. package/src/llama.cpp/examples/batched-bench/CMakeLists.txt +5 -0
  45. package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +261 -0
  46. package/src/llama.cpp/examples/beam-search/CMakeLists.txt +5 -0
  47. package/src/llama.cpp/examples/beam-search/beam-search.cpp +188 -0
  48. package/src/llama.cpp/examples/benchmark/CMakeLists.txt +6 -0
  49. package/src/llama.cpp/examples/benchmark/benchmark-matmult.cpp +275 -0
  50. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +5 -0
  51. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +936 -0
  52. package/src/llama.cpp/examples/embedding/CMakeLists.txt +5 -0
  53. package/src/llama.cpp/examples/embedding/embedding.cpp +211 -0
  54. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +9 -0
  55. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +195 -0
  56. package/src/llama.cpp/examples/export-lora/CMakeLists.txt +5 -0
  57. package/src/llama.cpp/examples/export-lora/export-lora.cpp +462 -0
  58. package/src/llama.cpp/examples/finetune/CMakeLists.txt +5 -0
  59. package/src/llama.cpp/examples/finetune/finetune.cpp +1861 -0
  60. package/src/llama.cpp/examples/gbnf-validator/CMakeLists.txt +5 -0
  61. package/src/llama.cpp/examples/gbnf-validator/gbnf-validator.cpp +132 -0
  62. package/src/llama.cpp/examples/gguf/CMakeLists.txt +5 -0
  63. package/src/llama.cpp/examples/gguf/gguf.cpp +256 -0
  64. package/src/llama.cpp/examples/gguf-split/CMakeLists.txt +5 -0
  65. package/src/llama.cpp/examples/gguf-split/gguf-split.cpp +553 -0
  66. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +5 -0
  67. package/src/llama.cpp/examples/gritlm/gritlm.cpp +215 -0
  68. package/src/llama.cpp/examples/imatrix/CMakeLists.txt +5 -0
  69. package/src/llama.cpp/examples/imatrix/imatrix.cpp +655 -0
  70. package/src/llama.cpp/examples/infill/CMakeLists.txt +5 -0
  71. package/src/llama.cpp/examples/infill/infill.cpp +767 -0
  72. package/src/llama.cpp/examples/jeopardy/questions.txt +100 -0
  73. package/src/llama.cpp/examples/llama-bench/CMakeLists.txt +5 -0
  74. package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +1286 -0
  75. package/src/llama.cpp/examples/llama.android/app/src/main/cpp/CMakeLists.txt +50 -0
  76. package/src/llama.cpp/examples/llama.android/app/src/main/cpp/llama-android.cpp +443 -0
  77. package/src/llama.cpp/examples/llava/CMakeLists.txt +37 -0
  78. package/src/llama.cpp/examples/llava/clip.cpp +2027 -0
  79. package/src/llama.cpp/examples/llava/clip.h +85 -0
  80. package/src/llama.cpp/examples/llava/llava-cli.cpp +309 -0
  81. package/src/llama.cpp/examples/llava/llava.cpp +426 -0
  82. package/src/llama.cpp/examples/llava/llava.h +50 -0
  83. package/src/llama.cpp/examples/llava/requirements.txt +3 -0
  84. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +5 -0
  85. package/src/llama.cpp/examples/lookahead/lookahead.cpp +485 -0
  86. package/src/llama.cpp/examples/lookup/CMakeLists.txt +23 -0
  87. package/src/llama.cpp/examples/lookup/lookup-create.cpp +41 -0
  88. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +47 -0
  89. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +160 -0
  90. package/src/llama.cpp/examples/lookup/lookup.cpp +258 -0
  91. package/src/llama.cpp/examples/main/CMakeLists.txt +5 -0
  92. package/src/llama.cpp/examples/main/main.cpp +957 -0
  93. package/src/llama.cpp/examples/main-cmake-pkg/CMakeLists.txt +33 -0
  94. package/src/llama.cpp/examples/parallel/CMakeLists.txt +5 -0
  95. package/src/llama.cpp/examples/parallel/parallel.cpp +427 -0
  96. package/src/llama.cpp/examples/passkey/CMakeLists.txt +5 -0
  97. package/src/llama.cpp/examples/passkey/passkey.cpp +302 -0
  98. package/src/llama.cpp/examples/perplexity/CMakeLists.txt +5 -0
  99. package/src/llama.cpp/examples/perplexity/perplexity.cpp +1943 -0
  100. package/src/llama.cpp/examples/quantize/CMakeLists.txt +6 -0
  101. package/src/llama.cpp/examples/quantize/quantize.cpp +423 -0
  102. package/src/llama.cpp/examples/quantize-stats/CMakeLists.txt +6 -0
  103. package/src/llama.cpp/examples/quantize-stats/quantize-stats.cpp +424 -0
  104. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +5 -0
  105. package/src/llama.cpp/examples/retrieval/retrieval.cpp +350 -0
  106. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +5 -0
  107. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +246 -0
  108. package/src/llama.cpp/examples/server/CMakeLists.txt +40 -0
  109. package/src/llama.cpp/examples/server/bench/requirements.txt +2 -0
  110. package/src/llama.cpp/examples/server/httplib.h +9465 -0
  111. package/src/llama.cpp/examples/server/server.cpp +3826 -0
  112. package/src/llama.cpp/examples/server/tests/requirements.txt +6 -0
  113. package/src/llama.cpp/examples/server/utils.hpp +653 -0
  114. package/src/llama.cpp/examples/simple/CMakeLists.txt +5 -0
  115. package/src/llama.cpp/examples/simple/simple.cpp +183 -0
  116. package/src/llama.cpp/examples/speculative/CMakeLists.txt +5 -0
  117. package/src/llama.cpp/examples/speculative/speculative.cpp +614 -0
  118. package/src/llama.cpp/examples/sycl/CMakeLists.txt +9 -0
  119. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +13 -0
  120. package/src/llama.cpp/examples/tokenize/CMakeLists.txt +5 -0
  121. package/src/llama.cpp/examples/tokenize/tokenize.cpp +42 -0
  122. package/src/llama.cpp/examples/train-text-from-scratch/CMakeLists.txt +5 -0
  123. package/src/llama.cpp/examples/train-text-from-scratch/train-text-from-scratch.cpp +1252 -0
  124. package/src/llama.cpp/ggml-alloc.c +985 -0
  125. package/src/llama.cpp/ggml-alloc.h +76 -0
  126. package/src/llama.cpp/ggml-backend-impl.h +141 -0
  127. package/src/llama.cpp/ggml-backend.c +2099 -0
  128. package/src/llama.cpp/ggml-backend.h +233 -0
  129. package/src/llama.cpp/ggml-common.h +1853 -0
  130. package/src/llama.cpp/ggml-cuda.h +43 -0
  131. package/src/llama.cpp/ggml-impl.h +265 -0
  132. package/src/llama.cpp/ggml-kompute.cpp +2006 -0
  133. package/src/llama.cpp/ggml-kompute.h +46 -0
  134. package/src/llama.cpp/ggml-metal.h +66 -0
  135. package/src/llama.cpp/ggml-mpi.c +216 -0
  136. package/src/llama.cpp/ggml-mpi.h +39 -0
  137. package/src/llama.cpp/ggml-opencl.cpp +2301 -0
  138. package/src/llama.cpp/ggml-opencl.h +36 -0
  139. package/src/llama.cpp/ggml-quants.c +12678 -0
  140. package/src/llama.cpp/ggml-quants.h +133 -0
  141. package/src/llama.cpp/ggml-sycl.cpp +17882 -0
  142. package/src/llama.cpp/ggml-sycl.h +49 -0
  143. package/src/llama.cpp/ggml-vulkan-shaders.hpp +69849 -0
  144. package/src/llama.cpp/ggml-vulkan.cpp +6442 -0
  145. package/src/llama.cpp/ggml-vulkan.h +29 -0
  146. package/src/llama.cpp/ggml.c +21819 -0
  147. package/src/llama.cpp/ggml.h +2403 -0
  148. package/src/llama.cpp/llama.cpp +17468 -0
  149. package/src/llama.cpp/llama.h +1117 -0
  150. package/src/llama.cpp/pocs/CMakeLists.txt +12 -0
  151. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +9 -0
  152. package/src/llama.cpp/pocs/vdot/q8dot.cpp +172 -0
  153. package/src/llama.cpp/pocs/vdot/vdot.cpp +310 -0
  154. package/src/llama.cpp/prompts/LLM-questions.txt +49 -0
  155. package/src/llama.cpp/prompts/alpaca.txt +1 -0
  156. package/src/llama.cpp/prompts/assistant.txt +31 -0
  157. package/src/llama.cpp/prompts/chat-with-baichuan.txt +4 -0
  158. package/src/llama.cpp/prompts/chat-with-bob.txt +7 -0
  159. package/src/llama.cpp/prompts/chat-with-qwen.txt +1 -0
  160. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +7 -0
  161. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +7 -0
  162. package/src/llama.cpp/prompts/chat.txt +28 -0
  163. package/src/llama.cpp/prompts/dan-modified.txt +1 -0
  164. package/src/llama.cpp/prompts/dan.txt +1 -0
  165. package/src/llama.cpp/prompts/mnemonics.txt +93 -0
  166. package/src/llama.cpp/prompts/parallel-questions.txt +43 -0
  167. package/src/llama.cpp/prompts/reason-act.txt +18 -0
  168. package/src/llama.cpp/requirements/requirements-convert-hf-to-gguf.txt +3 -0
  169. package/src/llama.cpp/requirements/requirements-convert-llama-ggml-to-gguf.txt +1 -0
  170. package/src/llama.cpp/requirements/requirements-convert-lora-to-ggml.txt +2 -0
  171. package/src/llama.cpp/requirements/requirements-convert-persimmon-to-gguf.txt +2 -0
  172. package/src/llama.cpp/requirements/requirements-convert.txt +5 -0
  173. package/src/llama.cpp/requirements.txt +12 -0
  174. package/src/llama.cpp/scripts/gen-build-info-cpp.cmake +24 -0
  175. package/src/llama.cpp/scripts/xxd.cmake +16 -0
  176. package/src/llama.cpp/sgemm.cpp +999 -0
  177. package/src/llama.cpp/sgemm.h +12 -0
  178. package/src/llama.cpp/tests/CMakeLists.txt +78 -0
  179. package/src/llama.cpp/tests/get-model.cpp +21 -0
  180. package/src/llama.cpp/tests/get-model.h +2 -0
  181. package/src/llama.cpp/tests/test-autorelease.cpp +24 -0
  182. package/src/llama.cpp/tests/test-backend-ops.cpp +2266 -0
  183. package/src/llama.cpp/tests/test-c.c +7 -0
  184. package/src/llama.cpp/tests/test-chat-template.cpp +107 -0
  185. package/src/llama.cpp/tests/test-double-float.cpp +57 -0
  186. package/src/llama.cpp/tests/test-grad0.cpp +1606 -0
  187. package/src/llama.cpp/tests/test-grammar-integration.cpp +243 -0
  188. package/src/llama.cpp/tests/test-grammar-parser.cpp +250 -0
  189. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +899 -0
  190. package/src/llama.cpp/tests/test-llama-grammar.cpp +402 -0
  191. package/src/llama.cpp/tests/test-model-load-cancel.cpp +27 -0
  192. package/src/llama.cpp/tests/test-opt.cpp +181 -0
  193. package/src/llama.cpp/tests/test-quantize-fns.cpp +185 -0
  194. package/src/llama.cpp/tests/test-quantize-perf.cpp +363 -0
  195. package/src/llama.cpp/tests/test-rope.cpp +221 -0
  196. package/src/llama.cpp/tests/test-sampling.cpp +301 -0
  197. package/src/llama.cpp/tests/test-tokenizer-0-falcon.cpp +187 -0
  198. package/src/llama.cpp/tests/test-tokenizer-0-llama.cpp +190 -0
  199. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +123 -0
  200. package/src/llama.cpp/tests/test-tokenizer-1-llama.cpp +111 -0
  201. package/src/llama.cpp/unicode-data.cpp +1651 -0
  202. package/src/llama.cpp/unicode-data.h +16 -0
  203. package/src/llama.cpp/unicode.cpp +277 -0
  204. package/src/llama.cpp/unicode.h +28 -0
@@ -0,0 +1,85 @@
1
+ #ifndef CLIP_H
2
+ #define CLIP_H
3
+
4
+ #include <stddef.h>
5
+ #include <stdint.h>
6
+
7
+ #ifdef LLAMA_SHARED
8
+ # if defined(_WIN32) && !defined(__MINGW32__)
9
+ # ifdef LLAMA_BUILD
10
+ # define CLIP_API __declspec(dllexport)
11
+ # else
12
+ # define CLIP_API __declspec(dllimport)
13
+ # endif
14
+ # else
15
+ # define CLIP_API __attribute__ ((visibility ("default")))
16
+ # endif
17
+ #else
18
+ # define CLIP_API
19
+ #endif
20
+
21
+ struct clip_ctx;
22
+
23
+ #ifdef __cplusplus
24
+ extern "C" {
25
+ #endif
26
+
27
+ struct clip_ctx;
28
+
29
+ struct clip_image_u8_batch {
30
+ struct clip_image_u8 * data;
31
+ size_t size;
32
+ };
33
+
34
+ struct clip_image_f32_batch {
35
+ struct clip_image_f32 * data;
36
+ size_t size;
37
+ };
38
+
39
+ CLIP_API struct clip_ctx * clip_model_load (const char * fname, int verbosity);
40
+ CLIP_API struct clip_ctx * clip_model_load_cpu(const char * fname, int verbosity);
41
+
42
+ CLIP_API void clip_free(struct clip_ctx * ctx);
43
+
44
+ CLIP_API size_t clip_embd_nbytes(const struct clip_ctx * ctx);
45
+
46
+ CLIP_API int32_t clip_image_size (const struct clip_ctx * ctx);
47
+ CLIP_API int32_t clip_patch_size (const struct clip_ctx * ctx);
48
+ CLIP_API int32_t clip_hidden_size(const struct clip_ctx * ctx);
49
+
50
+ // TODO: should be enum, not string
51
+ CLIP_API const char * clip_patch_merge_type(const struct clip_ctx * ctx);
52
+
53
+ CLIP_API const int32_t * clip_image_grid(const struct clip_ctx * ctx);
54
+
55
+ CLIP_API int clip_n_patches (const struct clip_ctx * ctx);
56
+ CLIP_API int clip_n_mmproj_embd(const struct clip_ctx * ctx);
57
+
58
+ CLIP_API struct clip_image_u8 * clip_image_u8_init ();
59
+ CLIP_API struct clip_image_f32 * clip_image_f32_init();
60
+
61
+ CLIP_API void clip_image_u8_free (struct clip_image_u8 * img);
62
+ CLIP_API void clip_image_f32_free(struct clip_image_f32 * img);
63
+ CLIP_API void clip_image_u8_batch_free (struct clip_image_u8_batch * batch);
64
+ CLIP_API void clip_image_f32_batch_free(struct clip_image_f32_batch * batch);
65
+
66
+ CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img);
67
+
68
+ /** interpret bytes as an image file with length bytes_length, and use the result to populate img */
69
+ CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
70
+
71
+ /** preprocess img and store the result in res_imgs, pad_to_square may be overriden to false depending on model configuration */
72
+ CLIP_API bool clip_image_preprocess(struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32_batch * res_imgs );
73
+
74
+ CLIP_API struct ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx);
75
+
76
+ CLIP_API bool clip_image_encode (struct clip_ctx * ctx, int n_threads, struct clip_image_f32 * img, float * vec);
77
+ CLIP_API bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, const struct clip_image_f32_batch * imgs, float * vec);
78
+
79
+ CLIP_API bool clip_model_quantize(const char * fname_inp, const char * fname_out, int itype);
80
+
81
+ #ifdef __cplusplus
82
+ }
83
+ #endif
84
+
85
+ #endif // CLIP_H
@@ -0,0 +1,309 @@
1
+ #include "ggml.h"
2
+ #include "log.h"
3
+ #include "common.h"
4
+ #include "clip.h"
5
+ #include "llava.h"
6
+ #include "llama.h"
7
+
8
+ #include "base64.hpp"
9
+
10
+ #include <cstdio>
11
+ #include <cstdlib>
12
+ #include <vector>
13
+
14
+ static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_token> tokens, int n_batch, int * n_past) {
15
+ int N = (int) tokens.size();
16
+ for (int i = 0; i < N; i += n_batch) {
17
+ int n_eval = (int) tokens.size() - i;
18
+ if (n_eval > n_batch) {
19
+ n_eval = n_batch;
20
+ }
21
+ if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
22
+ LOG_TEE("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
23
+ return false;
24
+ }
25
+ *n_past += n_eval;
26
+ }
27
+ return true;
28
+ }
29
+
30
+ static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
31
+ std::vector<llama_token> tokens;
32
+ tokens.push_back(id);
33
+ return eval_tokens(ctx_llama, tokens, 1, n_past);
34
+ }
35
+
36
+ static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
37
+ std::string str2 = str;
38
+ std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos, true);
39
+ eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
40
+ return true;
41
+ }
42
+
43
+ static const char * sample(struct llama_sampling_context * ctx_sampling,
44
+ struct llama_context * ctx_llama,
45
+ int * n_past) {
46
+ const llama_token id = llama_sampling_sample(ctx_sampling, ctx_llama, NULL);
47
+ llama_sampling_accept(ctx_sampling, ctx_llama, id, true);
48
+ static std::string ret;
49
+ if (llama_token_is_eog(llama_get_model(ctx_llama), id)) {
50
+ ret = "</s>";
51
+ } else {
52
+ ret = llama_token_to_piece(ctx_llama, id);
53
+ }
54
+ eval_id(ctx_llama, id, n_past);
55
+ return ret.c_str();
56
+ }
57
+
58
+ static const char* IMG_BASE64_TAG_BEGIN = "<img src=\"data:image/jpeg;base64,";
59
+ static const char* IMG_BASE64_TAG_END = "\">";
60
+
61
+ static void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) {
62
+ begin_out = prompt.find(IMG_BASE64_TAG_BEGIN);
63
+ end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out);
64
+ }
65
+
66
+ static bool prompt_contains_image(const std::string& prompt) {
67
+ size_t begin, end;
68
+ find_image_tag_in_prompt(prompt, begin, end);
69
+ return (begin != std::string::npos);
70
+ }
71
+
72
+ // replaces the base64 image tag in the prompt with `replacement`
73
+ static llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) {
74
+ size_t img_base64_str_start, img_base64_str_end;
75
+ find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end);
76
+ if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) {
77
+ LOG_TEE("%s: invalid base64 image tag. must be %s<base64 byte string>%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END);
78
+ return NULL;
79
+ }
80
+
81
+ auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN);
82
+ auto base64_bytes_count = img_base64_str_end - base64_bytes_start;
83
+ auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count );
84
+
85
+ auto required_bytes = base64::required_encode_size(base64_str.size());
86
+ auto img_bytes = std::vector<unsigned char>(required_bytes);
87
+ base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin());
88
+
89
+ auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, img_bytes.data(), img_bytes.size());
90
+ if (!embed) {
91
+ LOG_TEE("%s: could not load image from base64 string.\n", __func__);
92
+ return NULL;
93
+ }
94
+
95
+ return embed;
96
+ }
97
+
98
+ static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") {
99
+ size_t begin, end;
100
+ find_image_tag_in_prompt(prompt, begin, end);
101
+ if (begin == std::string::npos || end == std::string::npos) {
102
+ return prompt;
103
+ }
104
+ auto pre = prompt.substr(0, begin);
105
+ auto post = prompt.substr(end + strlen(IMG_BASE64_TAG_END));
106
+ return pre + replacement + post;
107
+ }
108
+
109
+ struct llava_context {
110
+ struct clip_ctx * ctx_clip = NULL;
111
+ struct llama_context * ctx_llama = NULL;
112
+ struct llama_model * model = NULL;
113
+ };
114
+
115
+ static void show_additional_info(int /*argc*/, char ** argv) {
116
+ LOG_TEE("\n example usage: %s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
117
+ LOG_TEE(" note: a lower temperature value like 0.1 is recommended for better quality.\n");
118
+ }
119
+
120
+ static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_params * params) {
121
+
122
+ // load and preprocess the image
123
+ llava_image_embed * embed = NULL;
124
+ auto prompt = params->prompt;
125
+ if (prompt_contains_image(prompt)) {
126
+ if (!params->image.empty()) {
127
+ LOG_TEE("using base64 encoded image instead of command line image path\n");
128
+ }
129
+ embed = llava_image_embed_make_with_prompt_base64(ctx_llava->ctx_clip, params->n_threads, prompt);
130
+ if (!embed) {
131
+ LOG_TEE("%s: can't load image from prompt\n", __func__);
132
+ return NULL;
133
+ }
134
+ params->prompt = remove_image_from_prompt(prompt);
135
+ } else {
136
+ embed = llava_image_embed_make_with_filename(ctx_llava->ctx_clip, params->n_threads, params->image.c_str());
137
+ if (!embed) {
138
+ LOG_TEE("%s: is %s really an image file?\n", __func__, params->image.c_str());
139
+ return NULL;
140
+ }
141
+ }
142
+
143
+ return embed;
144
+ }
145
+
146
+ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, gpt_params * params, const std::string & prompt) {
147
+ int n_past = 0;
148
+
149
+ const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict;
150
+
151
+ std::string system_prompt, user_prompt;
152
+ size_t image_pos = prompt.find("<image>");
153
+ if (image_pos != std::string::npos) {
154
+ // new templating mode: Provide the full prompt including system message and use <image> as a placeholder for the image
155
+ system_prompt = prompt.substr(0, image_pos);
156
+ user_prompt = prompt.substr(image_pos + std::string("<image>").length());
157
+ LOG_TEE("system_prompt: %s\n", system_prompt.c_str());
158
+ if (params->verbose_prompt) {
159
+ auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, system_prompt, true, true);
160
+ for (int i = 0; i < (int) tmp.size(); i++) {
161
+ LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
162
+ }
163
+ }
164
+ LOG_TEE("user_prompt: %s\n", user_prompt.c_str());
165
+ if (params->verbose_prompt) {
166
+ auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
167
+ for (int i = 0; i < (int) tmp.size(); i++) {
168
+ LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
169
+ }
170
+ }
171
+ } else {
172
+ // llava-1.5 native mode
173
+ system_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:";
174
+ user_prompt = prompt + "\nASSISTANT:";
175
+ if (params->verbose_prompt) {
176
+ auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
177
+ for (int i = 0; i < (int) tmp.size(); i++) {
178
+ LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
179
+ }
180
+ }
181
+ }
182
+
183
+ eval_string(ctx_llava->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, true);
184
+ llava_eval_image_embed(ctx_llava->ctx_llama, image_embed, params->n_batch, &n_past);
185
+ eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false);
186
+
187
+ // generate the response
188
+
189
+ LOG_TEE("\n");
190
+
191
+ struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
192
+ std::string response = "";
193
+ for (int i = 0; i < max_tgt_len; i++) {
194
+ const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past);
195
+ response += tmp;
196
+ if (strcmp(tmp, "</s>") == 0) break;
197
+ if (strstr(tmp, "###")) break; // Yi-VL behavior
198
+ printf("%s", tmp);
199
+ if (strstr(response.c_str(), "<|im_end|>")) break; // Yi-34B llava-1.6 - for some reason those decode not as the correct token (tokenizer works)
200
+ if (strstr(response.c_str(), "<|im_start|>")) break; // Yi-34B llava-1.6
201
+ if (strstr(response.c_str(), "USER:")) break; // mistral llava-1.6
202
+
203
+ fflush(stdout);
204
+ }
205
+
206
+ llama_sampling_free(ctx_sampling);
207
+ printf("\n");
208
+ }
209
+
210
+
211
+ static struct llava_context * llava_init(gpt_params * params) {
212
+ const char * clip_path = params->mmproj.c_str();
213
+
214
+ auto prompt = params->prompt;
215
+ if (prompt.empty()) {
216
+ prompt = "describe the image in detail.";
217
+ }
218
+
219
+ auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
220
+
221
+ llama_backend_init();
222
+ llama_numa_init(params->numa);
223
+
224
+ llama_model_params model_params = llama_model_params_from_gpt_params(*params);
225
+
226
+ llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
227
+ if (model == NULL) {
228
+ LOG_TEE("%s: error: unable to load model\n" , __func__);
229
+ return NULL;
230
+ }
231
+
232
+ llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
233
+ ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings
234
+
235
+ llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params);
236
+
237
+ if (ctx_llama == NULL) {
238
+ LOG_TEE("%s: error: failed to create the llama_context\n" , __func__);
239
+ return NULL;
240
+ }
241
+
242
+ auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context));
243
+
244
+ ctx_llava->ctx_llama = ctx_llama;
245
+ ctx_llava->ctx_clip = ctx_clip;
246
+ ctx_llava->model = model;
247
+ return ctx_llava;
248
+ }
249
+
250
+ static void llava_free(struct llava_context * ctx_llava) {
251
+ if (ctx_llava->ctx_clip) {
252
+ clip_free(ctx_llava->ctx_clip);
253
+ ctx_llava->ctx_clip = NULL;
254
+ }
255
+
256
+ llama_free(ctx_llava->ctx_llama);
257
+ llama_free_model(ctx_llava->model);
258
+ llama_backend_free();
259
+ }
260
+
261
+ static void llama_log_callback_logTee(ggml_log_level level, const char * text, void * user_data) {
262
+ (void) level;
263
+ (void) user_data;
264
+ LOG_TEE("%s", text);
265
+ }
266
+
267
+ int main(int argc, char ** argv) {
268
+ ggml_time_init();
269
+
270
+ gpt_params params;
271
+
272
+ if (!gpt_params_parse(argc, argv, params)) {
273
+ show_additional_info(argc, argv);
274
+ return 1;
275
+ }
276
+
277
+ #ifndef LOG_DISABLE_LOGS
278
+ log_set_target(log_filename_generator("llava", "log"));
279
+ LOG_TEE("Log start\n");
280
+ log_dump_cmdline(argc, argv);
281
+ llama_log_set(llama_log_callback_logTee, nullptr);
282
+ #endif // LOG_DISABLE_LOGS
283
+
284
+ if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) {
285
+ gpt_print_usage(argc, argv, params);
286
+ show_additional_info(argc, argv);
287
+ return 1;
288
+ }
289
+
290
+ auto ctx_llava = llava_init(&params);
291
+ if (ctx_llava == NULL) {
292
+ LOG_TEE("%s: error: failed to init llava\n", __func__);
293
+ return 1;
294
+ }
295
+
296
+ auto image_embed = load_image(ctx_llava, &params);
297
+ if (!image_embed) {
298
+ return 1;
299
+ }
300
+
301
+ // process the prompt
302
+ process_prompt(ctx_llava, image_embed, &params, params.prompt);
303
+
304
+ llama_print_timings(ctx_llava->ctx_llama);
305
+
306
+ llava_image_embed_free(image_embed);
307
+ llava_free(ctx_llava);
308
+ return 0;
309
+ }