@fugood/llama.node 0.0.1-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. package/CMakeLists.txt +85 -0
  2. package/README.md +56 -0
  3. package/bin/darwin/arm64/llama-node.node +0 -0
  4. package/bin/darwin/x64/llama-node.node +0 -0
  5. package/bin/linux/arm64/llama-node.node +0 -0
  6. package/bin/linux/x64/llama-node.node +0 -0
  7. package/bin/win32/arm64/llama-node.node +0 -0
  8. package/bin/win32/arm64/node.lib +0 -0
  9. package/bin/win32/x64/llama-node.node +0 -0
  10. package/bin/win32/x64/node.lib +0 -0
  11. package/lib/binding.js +13 -0
  12. package/lib/binding.ts +57 -0
  13. package/lib/index.js +24 -0
  14. package/lib/index.ts +13 -0
  15. package/package.json +65 -0
  16. package/src/addons.cpp +506 -0
  17. package/src/llama.cpp/CMakeLists.txt +1320 -0
  18. package/src/llama.cpp/build.zig +172 -0
  19. package/src/llama.cpp/cmake/FindSIMD.cmake +100 -0
  20. package/src/llama.cpp/common/CMakeLists.txt +87 -0
  21. package/src/llama.cpp/common/base64.hpp +392 -0
  22. package/src/llama.cpp/common/common.cpp +2949 -0
  23. package/src/llama.cpp/common/common.h +324 -0
  24. package/src/llama.cpp/common/console.cpp +501 -0
  25. package/src/llama.cpp/common/console.h +19 -0
  26. package/src/llama.cpp/common/grammar-parser.cpp +440 -0
  27. package/src/llama.cpp/common/grammar-parser.h +29 -0
  28. package/src/llama.cpp/common/json-schema-to-grammar.cpp +764 -0
  29. package/src/llama.cpp/common/json-schema-to-grammar.h +4 -0
  30. package/src/llama.cpp/common/json.hpp +24766 -0
  31. package/src/llama.cpp/common/log.h +724 -0
  32. package/src/llama.cpp/common/ngram-cache.cpp +282 -0
  33. package/src/llama.cpp/common/ngram-cache.h +94 -0
  34. package/src/llama.cpp/common/sampling.cpp +353 -0
  35. package/src/llama.cpp/common/sampling.h +147 -0
  36. package/src/llama.cpp/common/stb_image.h +8396 -0
  37. package/src/llama.cpp/common/train.cpp +1513 -0
  38. package/src/llama.cpp/common/train.h +233 -0
  39. package/src/llama.cpp/examples/CMakeLists.txt +52 -0
  40. package/src/llama.cpp/examples/baby-llama/CMakeLists.txt +5 -0
  41. package/src/llama.cpp/examples/baby-llama/baby-llama.cpp +1640 -0
  42. package/src/llama.cpp/examples/batched/CMakeLists.txt +5 -0
  43. package/src/llama.cpp/examples/batched/batched.cpp +262 -0
  44. package/src/llama.cpp/examples/batched-bench/CMakeLists.txt +5 -0
  45. package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +261 -0
  46. package/src/llama.cpp/examples/beam-search/CMakeLists.txt +5 -0
  47. package/src/llama.cpp/examples/beam-search/beam-search.cpp +188 -0
  48. package/src/llama.cpp/examples/benchmark/CMakeLists.txt +6 -0
  49. package/src/llama.cpp/examples/benchmark/benchmark-matmult.cpp +275 -0
  50. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +5 -0
  51. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +936 -0
  52. package/src/llama.cpp/examples/embedding/CMakeLists.txt +5 -0
  53. package/src/llama.cpp/examples/embedding/embedding.cpp +211 -0
  54. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +9 -0
  55. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +195 -0
  56. package/src/llama.cpp/examples/export-lora/CMakeLists.txt +5 -0
  57. package/src/llama.cpp/examples/export-lora/export-lora.cpp +462 -0
  58. package/src/llama.cpp/examples/finetune/CMakeLists.txt +5 -0
  59. package/src/llama.cpp/examples/finetune/finetune.cpp +1861 -0
  60. package/src/llama.cpp/examples/gbnf-validator/CMakeLists.txt +5 -0
  61. package/src/llama.cpp/examples/gbnf-validator/gbnf-validator.cpp +132 -0
  62. package/src/llama.cpp/examples/gguf/CMakeLists.txt +5 -0
  63. package/src/llama.cpp/examples/gguf/gguf.cpp +256 -0
  64. package/src/llama.cpp/examples/gguf-split/CMakeLists.txt +5 -0
  65. package/src/llama.cpp/examples/gguf-split/gguf-split.cpp +553 -0
  66. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +5 -0
  67. package/src/llama.cpp/examples/gritlm/gritlm.cpp +215 -0
  68. package/src/llama.cpp/examples/imatrix/CMakeLists.txt +5 -0
  69. package/src/llama.cpp/examples/imatrix/imatrix.cpp +655 -0
  70. package/src/llama.cpp/examples/infill/CMakeLists.txt +5 -0
  71. package/src/llama.cpp/examples/infill/infill.cpp +767 -0
  72. package/src/llama.cpp/examples/jeopardy/questions.txt +100 -0
  73. package/src/llama.cpp/examples/llama-bench/CMakeLists.txt +5 -0
  74. package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +1286 -0
  75. package/src/llama.cpp/examples/llama.android/app/src/main/cpp/CMakeLists.txt +50 -0
  76. package/src/llama.cpp/examples/llama.android/app/src/main/cpp/llama-android.cpp +443 -0
  77. package/src/llama.cpp/examples/llava/CMakeLists.txt +37 -0
  78. package/src/llama.cpp/examples/llava/clip.cpp +2027 -0
  79. package/src/llama.cpp/examples/llava/clip.h +85 -0
  80. package/src/llama.cpp/examples/llava/llava-cli.cpp +309 -0
  81. package/src/llama.cpp/examples/llava/llava.cpp +426 -0
  82. package/src/llama.cpp/examples/llava/llava.h +50 -0
  83. package/src/llama.cpp/examples/llava/requirements.txt +3 -0
  84. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +5 -0
  85. package/src/llama.cpp/examples/lookahead/lookahead.cpp +485 -0
  86. package/src/llama.cpp/examples/lookup/CMakeLists.txt +23 -0
  87. package/src/llama.cpp/examples/lookup/lookup-create.cpp +41 -0
  88. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +47 -0
  89. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +160 -0
  90. package/src/llama.cpp/examples/lookup/lookup.cpp +258 -0
  91. package/src/llama.cpp/examples/main/CMakeLists.txt +5 -0
  92. package/src/llama.cpp/examples/main/main.cpp +957 -0
  93. package/src/llama.cpp/examples/main-cmake-pkg/CMakeLists.txt +33 -0
  94. package/src/llama.cpp/examples/parallel/CMakeLists.txt +5 -0
  95. package/src/llama.cpp/examples/parallel/parallel.cpp +427 -0
  96. package/src/llama.cpp/examples/passkey/CMakeLists.txt +5 -0
  97. package/src/llama.cpp/examples/passkey/passkey.cpp +302 -0
  98. package/src/llama.cpp/examples/perplexity/CMakeLists.txt +5 -0
  99. package/src/llama.cpp/examples/perplexity/perplexity.cpp +1943 -0
  100. package/src/llama.cpp/examples/quantize/CMakeLists.txt +6 -0
  101. package/src/llama.cpp/examples/quantize/quantize.cpp +423 -0
  102. package/src/llama.cpp/examples/quantize-stats/CMakeLists.txt +6 -0
  103. package/src/llama.cpp/examples/quantize-stats/quantize-stats.cpp +424 -0
  104. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +5 -0
  105. package/src/llama.cpp/examples/retrieval/retrieval.cpp +350 -0
  106. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +5 -0
  107. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +246 -0
  108. package/src/llama.cpp/examples/server/CMakeLists.txt +40 -0
  109. package/src/llama.cpp/examples/server/bench/requirements.txt +2 -0
  110. package/src/llama.cpp/examples/server/httplib.h +9465 -0
  111. package/src/llama.cpp/examples/server/server.cpp +3826 -0
  112. package/src/llama.cpp/examples/server/tests/requirements.txt +6 -0
  113. package/src/llama.cpp/examples/server/utils.hpp +653 -0
  114. package/src/llama.cpp/examples/simple/CMakeLists.txt +5 -0
  115. package/src/llama.cpp/examples/simple/simple.cpp +183 -0
  116. package/src/llama.cpp/examples/speculative/CMakeLists.txt +5 -0
  117. package/src/llama.cpp/examples/speculative/speculative.cpp +614 -0
  118. package/src/llama.cpp/examples/sycl/CMakeLists.txt +9 -0
  119. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +13 -0
  120. package/src/llama.cpp/examples/tokenize/CMakeLists.txt +5 -0
  121. package/src/llama.cpp/examples/tokenize/tokenize.cpp +42 -0
  122. package/src/llama.cpp/examples/train-text-from-scratch/CMakeLists.txt +5 -0
  123. package/src/llama.cpp/examples/train-text-from-scratch/train-text-from-scratch.cpp +1252 -0
  124. package/src/llama.cpp/ggml-alloc.c +985 -0
  125. package/src/llama.cpp/ggml-alloc.h +76 -0
  126. package/src/llama.cpp/ggml-backend-impl.h +141 -0
  127. package/src/llama.cpp/ggml-backend.c +2099 -0
  128. package/src/llama.cpp/ggml-backend.h +233 -0
  129. package/src/llama.cpp/ggml-common.h +1853 -0
  130. package/src/llama.cpp/ggml-cuda.h +43 -0
  131. package/src/llama.cpp/ggml-impl.h +265 -0
  132. package/src/llama.cpp/ggml-kompute.cpp +2006 -0
  133. package/src/llama.cpp/ggml-kompute.h +46 -0
  134. package/src/llama.cpp/ggml-metal.h +66 -0
  135. package/src/llama.cpp/ggml-mpi.c +216 -0
  136. package/src/llama.cpp/ggml-mpi.h +39 -0
  137. package/src/llama.cpp/ggml-opencl.cpp +2301 -0
  138. package/src/llama.cpp/ggml-opencl.h +36 -0
  139. package/src/llama.cpp/ggml-quants.c +12678 -0
  140. package/src/llama.cpp/ggml-quants.h +133 -0
  141. package/src/llama.cpp/ggml-sycl.cpp +17882 -0
  142. package/src/llama.cpp/ggml-sycl.h +49 -0
  143. package/src/llama.cpp/ggml-vulkan-shaders.hpp +69849 -0
  144. package/src/llama.cpp/ggml-vulkan.cpp +6442 -0
  145. package/src/llama.cpp/ggml-vulkan.h +29 -0
  146. package/src/llama.cpp/ggml.c +21819 -0
  147. package/src/llama.cpp/ggml.h +2403 -0
  148. package/src/llama.cpp/llama.cpp +17468 -0
  149. package/src/llama.cpp/llama.h +1117 -0
  150. package/src/llama.cpp/pocs/CMakeLists.txt +12 -0
  151. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +9 -0
  152. package/src/llama.cpp/pocs/vdot/q8dot.cpp +172 -0
  153. package/src/llama.cpp/pocs/vdot/vdot.cpp +310 -0
  154. package/src/llama.cpp/prompts/LLM-questions.txt +49 -0
  155. package/src/llama.cpp/prompts/alpaca.txt +1 -0
  156. package/src/llama.cpp/prompts/assistant.txt +31 -0
  157. package/src/llama.cpp/prompts/chat-with-baichuan.txt +4 -0
  158. package/src/llama.cpp/prompts/chat-with-bob.txt +7 -0
  159. package/src/llama.cpp/prompts/chat-with-qwen.txt +1 -0
  160. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +7 -0
  161. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +7 -0
  162. package/src/llama.cpp/prompts/chat.txt +28 -0
  163. package/src/llama.cpp/prompts/dan-modified.txt +1 -0
  164. package/src/llama.cpp/prompts/dan.txt +1 -0
  165. package/src/llama.cpp/prompts/mnemonics.txt +93 -0
  166. package/src/llama.cpp/prompts/parallel-questions.txt +43 -0
  167. package/src/llama.cpp/prompts/reason-act.txt +18 -0
  168. package/src/llama.cpp/requirements/requirements-convert-hf-to-gguf.txt +3 -0
  169. package/src/llama.cpp/requirements/requirements-convert-llama-ggml-to-gguf.txt +1 -0
  170. package/src/llama.cpp/requirements/requirements-convert-lora-to-ggml.txt +2 -0
  171. package/src/llama.cpp/requirements/requirements-convert-persimmon-to-gguf.txt +2 -0
  172. package/src/llama.cpp/requirements/requirements-convert.txt +5 -0
  173. package/src/llama.cpp/requirements.txt +12 -0
  174. package/src/llama.cpp/scripts/gen-build-info-cpp.cmake +24 -0
  175. package/src/llama.cpp/scripts/xxd.cmake +16 -0
  176. package/src/llama.cpp/sgemm.cpp +999 -0
  177. package/src/llama.cpp/sgemm.h +12 -0
  178. package/src/llama.cpp/tests/CMakeLists.txt +78 -0
  179. package/src/llama.cpp/tests/get-model.cpp +21 -0
  180. package/src/llama.cpp/tests/get-model.h +2 -0
  181. package/src/llama.cpp/tests/test-autorelease.cpp +24 -0
  182. package/src/llama.cpp/tests/test-backend-ops.cpp +2266 -0
  183. package/src/llama.cpp/tests/test-c.c +7 -0
  184. package/src/llama.cpp/tests/test-chat-template.cpp +107 -0
  185. package/src/llama.cpp/tests/test-double-float.cpp +57 -0
  186. package/src/llama.cpp/tests/test-grad0.cpp +1606 -0
  187. package/src/llama.cpp/tests/test-grammar-integration.cpp +243 -0
  188. package/src/llama.cpp/tests/test-grammar-parser.cpp +250 -0
  189. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +899 -0
  190. package/src/llama.cpp/tests/test-llama-grammar.cpp +402 -0
  191. package/src/llama.cpp/tests/test-model-load-cancel.cpp +27 -0
  192. package/src/llama.cpp/tests/test-opt.cpp +181 -0
  193. package/src/llama.cpp/tests/test-quantize-fns.cpp +185 -0
  194. package/src/llama.cpp/tests/test-quantize-perf.cpp +363 -0
  195. package/src/llama.cpp/tests/test-rope.cpp +221 -0
  196. package/src/llama.cpp/tests/test-sampling.cpp +301 -0
  197. package/src/llama.cpp/tests/test-tokenizer-0-falcon.cpp +187 -0
  198. package/src/llama.cpp/tests/test-tokenizer-0-llama.cpp +190 -0
  199. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +123 -0
  200. package/src/llama.cpp/tests/test-tokenizer-1-llama.cpp +111 -0
  201. package/src/llama.cpp/unicode-data.cpp +1651 -0
  202. package/src/llama.cpp/unicode-data.h +16 -0
  203. package/src/llama.cpp/unicode.cpp +277 -0
  204. package/src/llama.cpp/unicode.h +28 -0
@@ -0,0 +1,957 @@
1
+ #include "common.h"
2
+
3
+ #include "console.h"
4
+ #include "llama.h"
5
+
6
+ #include <cassert>
7
+ #include <cinttypes>
8
+ #include <cmath>
9
+ #include <cstdio>
10
+ #include <cstring>
11
+ #include <ctime>
12
+ #include <fstream>
13
+ #include <iostream>
14
+ #include <sstream>
15
+ #include <string>
16
+ #include <vector>
17
+
18
+ #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
19
+ #include <signal.h>
20
+ #include <unistd.h>
21
+ #elif defined (_WIN32)
22
+ #define WIN32_LEAN_AND_MEAN
23
+ #ifndef NOMINMAX
24
+ #define NOMINMAX
25
+ #endif
26
+ #include <windows.h>
27
+ #include <signal.h>
28
+ #endif
29
+
30
+ #if defined(_MSC_VER)
31
+ #pragma warning(disable: 4244 4267) // possible loss of data
32
+ #endif
33
+
34
+ static llama_context ** g_ctx;
35
+ static llama_model ** g_model;
36
+ static gpt_params * g_params;
37
+ static std::vector<llama_token> * g_input_tokens;
38
+ static std::ostringstream * g_output_ss;
39
+ static std::vector<llama_token> * g_output_tokens;
40
+ static bool is_interacting = false;
41
+
42
+ static bool file_exists(const std::string &path) {
43
+ std::ifstream f(path.c_str());
44
+ return f.good();
45
+ }
46
+
47
+ static bool file_is_empty(const std::string &path) {
48
+ std::ifstream f;
49
+ f.exceptions(std::ifstream::failbit | std::ifstream::badbit);
50
+ f.open(path.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
51
+ return f.tellg() == 0;
52
+ }
53
+
54
+ static void write_logfile(
55
+ const llama_context * ctx, const gpt_params & params, const llama_model * model,
56
+ const std::vector<llama_token> & input_tokens, const std::string & output,
57
+ const std::vector<llama_token> & output_tokens
58
+ ) {
59
+ if (params.logdir.empty()) {
60
+ return;
61
+ }
62
+
63
+ const std::string timestamp = get_sortable_timestamp();
64
+
65
+ const bool success = create_directory_with_parents(params.logdir);
66
+ if (!success) {
67
+ fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n",
68
+ __func__, params.logdir.c_str());
69
+ return;
70
+ }
71
+
72
+ const std::string logfile_path = params.logdir + timestamp + ".yml";
73
+ FILE * logfile = fopen(logfile_path.c_str(), "w");
74
+
75
+ if (logfile == NULL) {
76
+ fprintf(stderr, "%s: failed to open logfile %s\n", __func__, logfile_path.c_str());
77
+ return;
78
+ }
79
+
80
+ fprintf(logfile, "binary: main\n");
81
+ char model_desc[128];
82
+ llama_model_desc(model, model_desc, sizeof(model_desc));
83
+ dump_non_result_info_yaml(logfile, params, ctx, timestamp, input_tokens, model_desc);
84
+
85
+ fprintf(logfile, "\n");
86
+ fprintf(logfile, "######################\n");
87
+ fprintf(logfile, "# Generation Results #\n");
88
+ fprintf(logfile, "######################\n");
89
+ fprintf(logfile, "\n");
90
+
91
+ dump_string_yaml_multiline(logfile, "output", output.c_str());
92
+ dump_vector_int_yaml(logfile, "output_tokens", output_tokens);
93
+
94
+ llama_dump_timing_info_yaml(logfile, ctx);
95
+ fclose(logfile);
96
+ }
97
+
98
+ #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
99
+ static void sigint_handler(int signo) {
100
+ if (signo == SIGINT) {
101
+ if (!is_interacting && g_params->interactive) {
102
+ is_interacting = true;
103
+ } else {
104
+ console::cleanup();
105
+ printf("\n");
106
+ llama_print_timings(*g_ctx);
107
+ write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens);
108
+ _exit(130);
109
+ }
110
+ }
111
+ }
112
+ #endif
113
+
114
+ static void llama_log_callback_logTee(ggml_log_level level, const char * text, void * user_data) {
115
+ (void) level;
116
+ (void) user_data;
117
+ LOG_TEE("%s", text);
118
+ }
119
+
120
+ int main(int argc, char ** argv) {
121
+ gpt_params params;
122
+ g_params = &params;
123
+
124
+ if (!gpt_params_parse(argc, argv, params)) {
125
+ return 1;
126
+ }
127
+ llama_sampling_params & sparams = params.sparams;
128
+
129
+ #ifndef LOG_DISABLE_LOGS
130
+ log_set_target(log_filename_generator("main", "log"));
131
+ LOG_TEE("Log start\n");
132
+ log_dump_cmdline(argc, argv);
133
+ llama_log_set(llama_log_callback_logTee, nullptr);
134
+ #endif // LOG_DISABLE_LOGS
135
+
136
+ // TODO: Dump params ?
137
+ //LOG("Params perplexity: %s\n", LOG_TOSTR(params.perplexity));
138
+
139
+ // save choice to use color for later
140
+ // (note for later: this is a slightly awkward choice)
141
+ console::init(params.simple_io, params.use_color);
142
+ atexit([]() { console::cleanup(); });
143
+
144
+ if (params.logits_all) {
145
+ printf("\n************\n");
146
+ printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__);
147
+ printf("************\n\n");
148
+
149
+ return 0;
150
+ }
151
+
152
+ if (params.embedding) {
153
+ printf("\n************\n");
154
+ printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
155
+ printf("************\n\n");
156
+
157
+ return 0;
158
+ }
159
+
160
+ if (params.n_ctx != 0 && params.n_ctx < 8) {
161
+ LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__);
162
+ params.n_ctx = 8;
163
+ }
164
+
165
+ if (params.rope_freq_base != 0.0) {
166
+ LOG_TEE("%s: warning: changing RoPE frequency base to %g.\n", __func__, params.rope_freq_base);
167
+ }
168
+
169
+ if (params.rope_freq_scale != 0.0) {
170
+ LOG_TEE("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale);
171
+ }
172
+
173
+ LOG_TEE("%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
174
+ LOG_TEE("%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET);
175
+
176
+ if (params.seed == LLAMA_DEFAULT_SEED) {
177
+ params.seed = time(NULL);
178
+ }
179
+
180
+ LOG_TEE("%s: seed = %u\n", __func__, params.seed);
181
+
182
+ std::mt19937 rng(params.seed);
183
+ if (params.random_prompt) {
184
+ params.prompt = gpt_random_prompt(rng);
185
+ }
186
+
187
+ LOG("%s: llama backend init\n", __func__);
188
+ llama_backend_init();
189
+ llama_numa_init(params.numa);
190
+
191
+ llama_model * model;
192
+ llama_context * ctx;
193
+ llama_context * ctx_guidance = NULL;
194
+ g_model = &model;
195
+ g_ctx = &ctx;
196
+
197
+ // load the model and apply lora adapter, if any
198
+ LOG("%s: load the model and apply lora adapter, if any\n", __func__);
199
+ std::tie(model, ctx) = llama_init_from_gpt_params(params);
200
+ if (sparams.cfg_scale > 1.f) {
201
+ struct llama_context_params lparams = llama_context_params_from_gpt_params(params);
202
+ ctx_guidance = llama_new_context_with_model(model, lparams);
203
+ }
204
+
205
+ if (model == NULL) {
206
+ LOG_TEE("%s: error: unable to load model\n", __func__);
207
+ return 1;
208
+ }
209
+
210
+ const int n_ctx_train = llama_n_ctx_train(model);
211
+ const int n_ctx = llama_n_ctx(ctx);
212
+ LOG("n_ctx: %d\n", n_ctx);
213
+
214
+ if (n_ctx > n_ctx_train) {
215
+ LOG_TEE("%s: warning: model was trained on only %d context tokens (%d specified)\n",
216
+ __func__, n_ctx_train, n_ctx);
217
+ }
218
+
219
+ // print system information
220
+ {
221
+ LOG_TEE("\n");
222
+ LOG_TEE("%s\n", get_system_info(params).c_str());
223
+ }
224
+
225
+ std::string path_session = params.path_prompt_cache;
226
+ std::vector<llama_token> session_tokens;
227
+
228
+ if (!path_session.empty()) {
229
+ LOG_TEE("%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
230
+ if (!file_exists(path_session)) {
231
+ LOG_TEE("%s: session file does not exist, will create.\n", __func__);
232
+ } else if (file_is_empty(path_session)) {
233
+ LOG_TEE("%s: The session file is empty. A new session will be initialized.\n", __func__);
234
+ } else {
235
+ // The file exists and is not empty
236
+ session_tokens.resize(n_ctx);
237
+ size_t n_token_count_out = 0;
238
+ if (!llama_state_load_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
239
+ LOG_TEE("%s: error: failed to load session file '%s'\n", __func__, path_session.c_str());
240
+ return 1;
241
+ }
242
+ session_tokens.resize(n_token_count_out);
243
+ llama_set_rng_seed(ctx, params.seed);
244
+ LOG_TEE("%s: loaded a session with prompt size of %d tokens\n", __func__, (int)session_tokens.size());
245
+ }
246
+ }
247
+
248
+ const bool add_bos = llama_should_add_bos_token(model);
249
+ GGML_ASSERT(llama_add_eos_token(model) != 1);
250
+ LOG("add_bos: %d\n", add_bos);
251
+
252
+ std::vector<llama_token> embd_inp;
253
+
254
+ if (params.interactive_first || params.instruct || params.chatml || !params.prompt.empty() || session_tokens.empty()) {
255
+ LOG("tokenize the prompt\n");
256
+ if (params.chatml) {
257
+ params.prompt = "<|im_start|>system\n" + params.prompt + "<|im_end|>";
258
+ }
259
+ embd_inp = ::llama_tokenize(ctx, params.prompt, true, true);
260
+ } else {
261
+ LOG("use session tokens\n");
262
+ embd_inp = session_tokens;
263
+ }
264
+
265
+ LOG("prompt: \"%s\"\n", log_tostr(params.prompt));
266
+ LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
267
+
268
+ // Should not run without any tokens
269
+ if (embd_inp.empty()) {
270
+ embd_inp.push_back(llama_token_bos(model));
271
+ LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
272
+ }
273
+
274
+ // Tokenize negative prompt
275
+ std::vector<llama_token> guidance_inp;
276
+ int guidance_offset = 0;
277
+ int original_prompt_len = 0;
278
+ if (ctx_guidance) {
279
+ LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt));
280
+
281
+ guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, true, true);
282
+ LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp).c_str());
283
+
284
+ std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, true, true);
285
+ LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp).c_str());
286
+
287
+ original_prompt_len = original_inp.size();
288
+ guidance_offset = (int)guidance_inp.size() - original_prompt_len;
289
+ LOG("original_prompt_len: %s", log_tostr(original_prompt_len));
290
+ LOG("guidance_offset: %s", log_tostr(guidance_offset));
291
+ }
292
+
293
+ if ((int) embd_inp.size() > n_ctx - 4) {
294
+ LOG_TEE("%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
295
+ return 1;
296
+ }
297
+
298
+ // debug message about similarity of saved session, if applicable
299
+ size_t n_matching_session_tokens = 0;
300
+ if (!session_tokens.empty()) {
301
+ for (llama_token id : session_tokens) {
302
+ if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
303
+ break;
304
+ }
305
+ n_matching_session_tokens++;
306
+ }
307
+ if (params.prompt.empty() && n_matching_session_tokens == embd_inp.size()) {
308
+ LOG_TEE("%s: using full prompt from session file\n", __func__);
309
+ } else if (n_matching_session_tokens >= embd_inp.size()) {
310
+ LOG_TEE("%s: session file has exact match for prompt!\n", __func__);
311
+ } else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
312
+ LOG_TEE("%s: warning: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n",
313
+ __func__, n_matching_session_tokens, embd_inp.size());
314
+ } else {
315
+ LOG_TEE("%s: session file matches %zu / %zu tokens of prompt\n",
316
+ __func__, n_matching_session_tokens, embd_inp.size());
317
+ }
318
+
319
+ // remove any "future" tokens that we might have inherited from the previous session
320
+ llama_kv_cache_seq_rm(ctx, -1, n_matching_session_tokens, -1);
321
+ }
322
+
323
+ LOGLN(
324
+ "recalculate the cached logits (check): embd_inp.empty() %s, n_matching_session_tokens %zu, embd_inp.size() %zu, session_tokens.size() %zu, embd_inp.size() %zu",
325
+ log_tostr(embd_inp.empty()), n_matching_session_tokens, embd_inp.size(), session_tokens.size(), embd_inp.size());
326
+
327
+ // if we will use the cache for the full prompt without reaching the end of the cache, force
328
+ // reevaluation of the last token token to recalculate the cached logits
329
+ if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() && session_tokens.size() > embd_inp.size()) {
330
+ LOGLN("recalculate the cached logits (do): session_tokens.resize( %zu )", embd_inp.size() - 1);
331
+
332
+ session_tokens.resize(embd_inp.size() - 1);
333
+ }
334
+
335
+ // number of tokens to keep when resetting context
336
+ if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size() || params.instruct || params.chatml) {
337
+ params.n_keep = (int)embd_inp.size();
338
+ } else {
339
+ params.n_keep += add_bos; // always keep the BOS token
340
+ }
341
+
342
+ // prefix & suffix for instruct mode
343
+ const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", true, true);
344
+ const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false, true);
345
+
346
+ LOG("inp_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_pfx).c_str());
347
+ LOG("inp_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_sfx).c_str());
348
+
349
+ // chatml prefix & suffix
350
+ const auto cml_pfx = ::llama_tokenize(ctx, "\n<|im_start|>user\n", true, true);
351
+ const auto cml_sfx = ::llama_tokenize(ctx, "<|im_end|>\n<|im_start|>assistant\n", false, true);
352
+
353
+ LOG("cml_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, cml_pfx).c_str());
354
+ LOG("cml_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, cml_sfx).c_str());
355
+
356
+ // in instruct mode, we inject a prefix and a suffix to each input by the user
357
+ if (params.instruct) {
358
+ params.interactive_first = true;
359
+ params.antiprompt.emplace_back("### Instruction:\n\n");
360
+ }
361
+ // similar for chatml mode
362
+ else if (params.chatml) {
363
+ params.interactive_first = true;
364
+ params.antiprompt.emplace_back("<|im_start|>user\n");
365
+ }
366
+
367
+ // enable interactive mode if interactive start is specified
368
+ if (params.interactive_first) {
369
+ params.interactive = true;
370
+ }
371
+
372
+ if (params.verbose_prompt) {
373
+ LOG_TEE("\n");
374
+ LOG_TEE("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
375
+ LOG_TEE("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
376
+ for (int i = 0; i < (int) embd_inp.size(); i++) {
377
+ LOG_TEE("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
378
+ }
379
+
380
+ if (ctx_guidance) {
381
+ LOG_TEE("\n");
382
+ LOG_TEE("%s: negative prompt: '%s'\n", __func__, sparams.cfg_negative_prompt.c_str());
383
+ LOG_TEE("%s: number of tokens in negative prompt = %zu\n", __func__, guidance_inp.size());
384
+ for (int i = 0; i < (int) guidance_inp.size(); i++) {
385
+ LOG_TEE("%6d -> '%s'\n", guidance_inp[i], llama_token_to_piece(ctx, guidance_inp[i]).c_str());
386
+ }
387
+ }
388
+
389
+ if (params.n_keep > add_bos) {
390
+ LOG_TEE("%s: static prompt based on n_keep: '", __func__);
391
+ for (int i = 0; i < params.n_keep; i++) {
392
+ LOG_TEE("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
393
+ }
394
+ LOG_TEE("'\n");
395
+ }
396
+ LOG_TEE("\n");
397
+ }
398
+
399
+ // ctrl+C handling
400
+ {
401
+ #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
402
+ struct sigaction sigint_action;
403
+ sigint_action.sa_handler = sigint_handler;
404
+ sigemptyset (&sigint_action.sa_mask);
405
+ sigint_action.sa_flags = 0;
406
+ sigaction(SIGINT, &sigint_action, NULL);
407
+ #elif defined (_WIN32)
408
+ auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
409
+ return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
410
+ };
411
+ SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
412
+ #endif
413
+ }
414
+
415
+ if (params.interactive) {
416
+ LOG_TEE("%s: interactive mode on.\n", __func__);
417
+
418
+ if (!params.antiprompt.empty()) {
419
+ for (const auto & antiprompt : params.antiprompt) {
420
+ LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str());
421
+ if (params.verbose_prompt) {
422
+ auto tmp = ::llama_tokenize(ctx, antiprompt, false, true);
423
+ for (int i = 0; i < (int) tmp.size(); i++) {
424
+ LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
425
+ }
426
+ }
427
+ }
428
+ }
429
+
430
+ if (params.input_prefix_bos) {
431
+ LOG_TEE("Input prefix with BOS\n");
432
+ }
433
+
434
+ if (!params.input_prefix.empty()) {
435
+ LOG_TEE("Input prefix: '%s'\n", params.input_prefix.c_str());
436
+ if (params.verbose_prompt) {
437
+ auto tmp = ::llama_tokenize(ctx, params.input_prefix, true, true);
438
+ for (int i = 0; i < (int) tmp.size(); i++) {
439
+ LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
440
+ }
441
+ }
442
+ }
443
+
444
+ if (!params.input_suffix.empty()) {
445
+ LOG_TEE("Input suffix: '%s'\n", params.input_suffix.c_str());
446
+ if (params.verbose_prompt) {
447
+ auto tmp = ::llama_tokenize(ctx, params.input_suffix, false, true);
448
+ for (int i = 0; i < (int) tmp.size(); i++) {
449
+ LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
450
+ }
451
+ }
452
+ }
453
+ }
454
+ LOG_TEE("sampling: \n%s\n", llama_sampling_print(sparams).c_str());
455
+ LOG_TEE("sampling order: \n%s\n", llama_sampling_order_print(sparams).c_str());
456
+ LOG_TEE("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
457
+
458
+ // group-attention state
459
+ // number of grouped KV tokens so far (used only if params.grp_attn_n > 1)
460
+ int ga_i = 0;
461
+
462
+ const int ga_n = params.grp_attn_n;
463
+ const int ga_w = params.grp_attn_w;
464
+
465
+ if (ga_n != 1) {
466
+ GGML_ASSERT(ga_n > 0 && "grp_attn_n must be positive"); // NOLINT
467
+ GGML_ASSERT(ga_w % ga_n == 0 && "grp_attn_w must be a multiple of grp_attn_n"); // NOLINT
468
+ //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of grp_attn_w"); // NOLINT
469
+ //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * grp_attn_n"); // NOLINT
470
+ LOG_TEE("self-extend: n_ctx_train = %d, grp_attn_n = %d, grp_attn_w = %d\n", n_ctx_train, ga_n, ga_w);
471
+ }
472
+ LOG_TEE("\n\n");
473
+
474
+ if (params.interactive) {
475
+ const char *control_message;
476
+ if (params.multiline_input) {
477
+ control_message = " - To return control to LLaMa, end your input with '\\'.\n"
478
+ " - To return control without starting a new line, end your input with '/'.\n";
479
+ } else {
480
+ control_message = " - Press Return to return control to LLaMa.\n"
481
+ " - To return control without starting a new line, end your input with '/'.\n"
482
+ " - If you want to submit another line, end your input with '\\'.\n";
483
+ }
484
+ LOG_TEE("== Running in interactive mode. ==\n");
485
+ #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
486
+ LOG_TEE( " - Press Ctrl+C to interject at any time.\n");
487
+ #endif
488
+ LOG_TEE( "%s\n", control_message);
489
+
490
+ is_interacting = params.interactive_first;
491
+ }
492
+
493
+ bool is_antiprompt = false;
494
+ bool input_echo = true;
495
+ bool display = true;
496
+ bool need_to_save_session = !path_session.empty() && n_matching_session_tokens < embd_inp.size();
497
+
498
+ int n_past = 0;
499
+ int n_remain = params.n_predict;
500
+ int n_consumed = 0;
501
+ int n_session_consumed = 0;
502
+ int n_past_guidance = 0;
503
+
504
+ std::vector<int> input_tokens; g_input_tokens = &input_tokens;
505
+ std::vector<int> output_tokens; g_output_tokens = &output_tokens;
506
+ std::ostringstream output_ss; g_output_ss = &output_ss;
507
+
508
+ // the first thing we will do is to output the prompt, so set color accordingly
509
+ console::set_display(console::prompt);
510
+ display = params.display_prompt;
511
+
512
+ std::vector<llama_token> embd;
513
+ std::vector<llama_token> embd_guidance;
514
+
515
+ // tokenized antiprompts
516
+ std::vector<std::vector<llama_token>> antiprompt_ids;
517
+
518
+ antiprompt_ids.reserve(params.antiprompt.size());
519
+ for (const std::string & antiprompt : params.antiprompt) {
520
+ antiprompt_ids.emplace_back(::llama_tokenize(ctx, antiprompt, false, true));
521
+ }
522
+
523
+ struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams);
524
+
525
+ while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
526
+ // predict
527
+ if (!embd.empty()) {
528
+ // Note: (n_ctx - 4) here is to match the logic for commandline prompt handling via
529
+ // --prompt or --file which uses the same value.
530
+ int max_embd_size = n_ctx - 4;
531
+
532
+ // Ensure the input doesn't exceed the context size by truncating embd if necessary.
533
+ if ((int) embd.size() > max_embd_size) {
534
+ const int skipped_tokens = (int) embd.size() - max_embd_size;
535
+ embd.resize(max_embd_size);
536
+
537
+ console::set_display(console::error);
538
+ printf("<<input too long: skipped %d token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
539
+ console::set_display(console::reset);
540
+ fflush(stdout);
541
+ }
542
+
543
+ if (ga_n == 1) {
544
+ // infinite text generation via context shifting
545
+ // if we run out of context:
546
+ // - take the n_keep first tokens from the original prompt (via n_past)
547
+ // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
548
+ if (n_past + (int) embd.size() + std::max<int>(0, guidance_offset) > n_ctx) {
549
+ if (params.n_predict == -2) {
550
+ LOG_TEE("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
551
+ break;
552
+ }
553
+
554
+ const int n_left = n_past - params.n_keep;
555
+ const int n_discard = n_left/2;
556
+
557
+ LOG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n",
558
+ n_past, n_left, n_ctx, params.n_keep, n_discard);
559
+
560
+ llama_kv_cache_seq_rm (ctx, 0, params.n_keep , params.n_keep + n_discard);
561
+ llama_kv_cache_seq_add(ctx, 0, params.n_keep + n_discard, n_past, -n_discard);
562
+
563
+ n_past -= n_discard;
564
+
565
+ if (ctx_guidance) {
566
+ n_past_guidance -= n_discard;
567
+ }
568
+
569
+ LOG("after swap: n_past = %d, n_past_guidance = %d\n", n_past, n_past_guidance);
570
+
571
+ LOG("embd: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str());
572
+
573
+ LOG("clear session path\n");
574
+ path_session.clear();
575
+ }
576
+ } else {
577
+ // context extension via Self-Extend
578
+ while (n_past >= ga_i + ga_w) {
579
+ const int ib = (ga_n*ga_i)/ga_w;
580
+ const int bd = (ga_w/ga_n)*(ga_n - 1);
581
+ const int dd = (ga_w/ga_n) - ib*bd - ga_w;
582
+
583
+ LOG("\n");
584
+ LOG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i, n_past, ib*bd, ga_i + ib*bd, n_past + ib*bd);
585
+ LOG("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n, (ga_i + ib*bd)/ga_n, (ga_i + ib*bd + ga_w)/ga_n);
586
+ LOG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i + ib*bd + ga_w, n_past + ib*bd, dd, ga_i + ib*bd + ga_w + dd, n_past + ib*bd + dd);
587
+
588
+ llama_kv_cache_seq_add(ctx, 0, ga_i, n_past, ib*bd);
589
+ llama_kv_cache_seq_div(ctx, 0, ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n);
590
+ llama_kv_cache_seq_add(ctx, 0, ga_i + ib*bd + ga_w, n_past + ib*bd, dd);
591
+
592
+ n_past -= bd;
593
+
594
+ ga_i += ga_w/ga_n;
595
+
596
+ LOG("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", n_past + bd, n_past, ga_i);
597
+ }
598
+ }
599
+
600
+ // try to reuse a matching prefix from the loaded session instead of re-eval (via n_past)
601
+ if (n_session_consumed < (int) session_tokens.size()) {
602
+ size_t i = 0;
603
+ for ( ; i < embd.size(); i++) {
604
+ if (embd[i] != session_tokens[n_session_consumed]) {
605
+ session_tokens.resize(n_session_consumed);
606
+ break;
607
+ }
608
+
609
+ n_past++;
610
+ n_session_consumed++;
611
+
612
+ if (n_session_consumed >= (int) session_tokens.size()) {
613
+ ++i;
614
+ break;
615
+ }
616
+ }
617
+ if (i > 0) {
618
+ embd.erase(embd.begin(), embd.begin() + i);
619
+ }
620
+ }
621
+
622
+ // evaluate tokens in batches
623
+ // embd is typically prepared beforehand to fit within a batch, but not always
624
+ if (ctx_guidance) {
625
+ int input_size = 0;
626
+ llama_token * input_buf = NULL;
627
+
628
+ if (n_past_guidance < (int) guidance_inp.size()) {
629
+ // Guidance context should have the same data with these modifications:
630
+ //
631
+ // * Replace the initial prompt
632
+ // * Shift everything by guidance_offset
633
+ embd_guidance = guidance_inp;
634
+ if (embd.begin() + original_prompt_len < embd.end()) {
635
+ embd_guidance.insert(
636
+ embd_guidance.end(),
637
+ embd.begin() + original_prompt_len,
638
+ embd.end()
639
+ );
640
+ }
641
+
642
+ input_buf = embd_guidance.data();
643
+ input_size = embd_guidance.size();
644
+
645
+ LOG("guidance context: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_guidance).c_str());
646
+ } else {
647
+ input_buf = embd.data();
648
+ input_size = embd.size();
649
+ }
650
+
651
+ for (int i = 0; i < input_size; i += params.n_batch) {
652
+ int n_eval = std::min(input_size - i, params.n_batch);
653
+ if (llama_decode(ctx_guidance, llama_batch_get_one(input_buf + i, n_eval, n_past_guidance, 0))) {
654
+ LOG_TEE("%s : failed to eval\n", __func__);
655
+ return 1;
656
+ }
657
+
658
+ n_past_guidance += n_eval;
659
+ }
660
+ }
661
+
662
+ for (int i = 0; i < (int) embd.size(); i += params.n_batch) {
663
+ int n_eval = (int) embd.size() - i;
664
+ if (n_eval > params.n_batch) {
665
+ n_eval = params.n_batch;
666
+ }
667
+
668
+ LOG("eval: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str());
669
+
670
+ if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
671
+ LOG_TEE("%s : failed to eval\n", __func__);
672
+ return 1;
673
+ }
674
+
675
+ n_past += n_eval;
676
+
677
+ LOG("n_past = %d\n", n_past);
678
+ // Display total tokens alongside total time
679
+ if (params.n_print > 0 && n_past % params.n_print == 0) {
680
+ LOG_TEE("\n\033[31mTokens consumed so far = %d / %d \033[0m\n", n_past, n_ctx);
681
+ }
682
+ }
683
+
684
+ if (!embd.empty() && !path_session.empty()) {
685
+ session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
686
+ n_session_consumed = session_tokens.size();
687
+ }
688
+ }
689
+
690
+ embd.clear();
691
+ embd_guidance.clear();
692
+
693
+ if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
694
+ // optionally save the session on first sample (for faster prompt loading next time)
695
+ if (!path_session.empty() && need_to_save_session && !params.prompt_cache_ro) {
696
+ need_to_save_session = false;
697
+ llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
698
+
699
+ LOG("saved session to %s\n", path_session.c_str());
700
+ }
701
+
702
+ const llama_token id = llama_sampling_sample(ctx_sampling, ctx, ctx_guidance);
703
+
704
+ llama_sampling_accept(ctx_sampling, ctx, id, true);
705
+
706
+ LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, ctx_sampling->prev).c_str());
707
+
708
+ embd.push_back(id);
709
+
710
+ // echo this to console
711
+ input_echo = true;
712
+
713
+ // decrement remaining sampling budget
714
+ --n_remain;
715
+
716
+ LOG("n_remain: %d\n", n_remain);
717
+ } else {
718
+ // some user input remains from prompt or interaction, forward it to processing
719
+ LOG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed);
720
+ while ((int) embd_inp.size() > n_consumed) {
721
+ embd.push_back(embd_inp[n_consumed]);
722
+
723
+ // push the prompt in the sampling context in order to apply repetition penalties later
724
+ // for the prompt, we don't apply grammar rules
725
+ llama_sampling_accept(ctx_sampling, ctx, embd_inp[n_consumed], false);
726
+
727
+ ++n_consumed;
728
+ if ((int) embd.size() >= params.n_batch) {
729
+ break;
730
+ }
731
+ }
732
+ }
733
+
734
+ // display text
735
+ if (input_echo && display) {
736
+ for (auto id : embd) {
737
+ const std::string token_str = llama_token_to_piece(ctx, id);
738
+ printf("%s", token_str.c_str());
739
+
740
+ if (embd.size() > 1) {
741
+ input_tokens.push_back(id);
742
+ } else {
743
+ output_tokens.push_back(id);
744
+ output_ss << token_str;
745
+ }
746
+ }
747
+ fflush(stdout);
748
+ }
749
+ // reset color to default if there is no pending user input
750
+ if (input_echo && (int) embd_inp.size() == n_consumed) {
751
+ console::set_display(console::reset);
752
+ display = true;
753
+ }
754
+
755
+ // if not currently processing queued inputs;
756
+ if ((int) embd_inp.size() <= n_consumed) {
757
+ // check for reverse prompt in the last n_prev tokens
758
+ if (!params.antiprompt.empty()) {
759
+ const int n_prev = 32;
760
+ const std::string last_output = llama_sampling_prev_str(ctx_sampling, ctx, n_prev);
761
+
762
+ is_antiprompt = false;
763
+ // Check if each of the reverse prompts appears at the end of the output.
764
+ // If we're not running interactively, the reverse prompt might be tokenized with some following characters
765
+ // so we'll compensate for that by widening the search window a bit.
766
+ for (std::string & antiprompt : params.antiprompt) {
767
+ size_t extra_padding = params.interactive ? 0 : 2;
768
+ size_t search_start_pos = last_output.length() > static_cast<size_t>(antiprompt.length() + extra_padding)
769
+ ? last_output.length() - static_cast<size_t>(antiprompt.length() + extra_padding)
770
+ : 0;
771
+
772
+ if (last_output.find(antiprompt, search_start_pos) != std::string::npos) {
773
+ if (params.interactive) {
774
+ is_interacting = true;
775
+ }
776
+ is_antiprompt = true;
777
+ break;
778
+ }
779
+ }
780
+
781
+ // check for reverse prompt using special tokens
782
+ llama_token last_token = llama_sampling_last(ctx_sampling);
783
+ for (std::vector<llama_token> ids : antiprompt_ids) {
784
+ if (ids.size() == 1 && last_token == ids[0]) {
785
+ if (params.interactive) {
786
+ is_interacting = true;
787
+ }
788
+ is_antiprompt = true;
789
+ break;
790
+ }
791
+ }
792
+
793
+ if (is_antiprompt) {
794
+ LOG("found antiprompt: %s\n", last_output.c_str());
795
+ }
796
+ }
797
+
798
+ // deal with end of generation tokens in interactive mode
799
+ if (llama_token_is_eog(model, llama_sampling_last(ctx_sampling))) {
800
+ LOG("found EOS token\n");
801
+
802
+ if (params.interactive) {
803
+ if (!params.antiprompt.empty()) {
804
+ // tokenize and inject first reverse prompt
805
+ const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false, true);
806
+ embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
807
+ is_antiprompt = true;
808
+ }
809
+
810
+ is_interacting = true;
811
+ printf("\n");
812
+ } else if (params.instruct || params.chatml) {
813
+ is_interacting = true;
814
+ }
815
+ }
816
+
817
+ if (n_past > 0 && is_interacting) {
818
+ LOG("waiting for user input\n");
819
+
820
+ if (params.instruct || params.chatml) {
821
+ printf("\n> ");
822
+ }
823
+
824
+ if (params.input_prefix_bos) {
825
+ LOG("adding input prefix BOS token\n");
826
+ embd_inp.push_back(llama_token_bos(model));
827
+ }
828
+
829
+ std::string buffer;
830
+ if (!params.input_prefix.empty()) {
831
+ LOG("appending input prefix: '%s'\n", params.input_prefix.c_str());
832
+ printf("%s", params.input_prefix.c_str());
833
+ }
834
+
835
+ // color user input only
836
+ console::set_display(console::user_input);
837
+ display = params.display_prompt;
838
+
839
+ std::string line;
840
+ bool another_line = true;
841
+ do {
842
+ another_line = console::readline(line, params.multiline_input);
843
+ buffer += line;
844
+ } while (another_line);
845
+
846
+ // done taking input, reset color
847
+ console::set_display(console::reset);
848
+ display = true;
849
+
850
+ // Add tokens to embd only if the input buffer is non-empty
851
+ // Entering a empty line lets the user pass control back
852
+ if (buffer.length() > 1) {
853
+ // append input suffix if any
854
+ if (!params.input_suffix.empty()) {
855
+ LOG("appending input suffix: '%s'\n", params.input_suffix.c_str());
856
+ printf("%s", params.input_suffix.c_str());
857
+ }
858
+
859
+ LOG("buffer: '%s'\n", buffer.c_str());
860
+
861
+ const size_t original_size = embd_inp.size();
862
+
863
+ // instruct mode: insert instruction prefix
864
+ if (params.instruct && !is_antiprompt) {
865
+ LOG("inserting instruction prefix\n");
866
+ n_consumed = embd_inp.size();
867
+ embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end());
868
+ }
869
+ // chatml mode: insert user chat prefix
870
+ if (params.chatml && !is_antiprompt) {
871
+ LOG("inserting chatml prefix\n");
872
+ n_consumed = embd_inp.size();
873
+ embd_inp.insert(embd_inp.end(), cml_pfx.begin(), cml_pfx.end());
874
+ }
875
+ if (params.escape) {
876
+ process_escapes(buffer);
877
+ }
878
+
879
+ const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
880
+ const auto line_inp = ::llama_tokenize(ctx, buffer, false, false);
881
+ const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
882
+
883
+ LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
884
+
885
+ embd_inp.insert(embd_inp.end(), line_pfx.begin(), line_pfx.end());
886
+ embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
887
+ embd_inp.insert(embd_inp.end(), line_sfx.begin(), line_sfx.end());
888
+
889
+ // instruct mode: insert response suffix
890
+ if (params.instruct) {
891
+ LOG("inserting instruction suffix\n");
892
+ embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
893
+ }
894
+ // chatml mode: insert assistant chat suffix
895
+ if (params.chatml) {
896
+ LOG("inserting chatml suffix\n");
897
+ embd_inp.insert(embd_inp.end(), cml_sfx.begin(), cml_sfx.end());
898
+ }
899
+
900
+ for (size_t i = original_size; i < embd_inp.size(); ++i) {
901
+ const llama_token token = embd_inp[i];
902
+ output_tokens.push_back(token);
903
+ output_ss << llama_token_to_piece(ctx, token);
904
+ }
905
+
906
+ n_remain -= line_inp.size();
907
+ LOG("n_remain: %d\n", n_remain);
908
+ } else {
909
+ LOG("empty line, passing control back\n");
910
+ }
911
+
912
+ input_echo = false; // do not echo this again
913
+ }
914
+
915
+ if (n_past > 0) {
916
+ if (is_interacting) {
917
+ llama_sampling_reset(ctx_sampling);
918
+ }
919
+ is_interacting = false;
920
+ }
921
+ }
922
+
923
+ // end of generation
924
+ if (!embd.empty() && llama_token_is_eog(model, embd.back()) && !(params.instruct || params.interactive || params.chatml)) {
925
+ LOG_TEE(" [end of text]\n");
926
+ break;
927
+ }
928
+
929
+ // In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
930
+ // We skip this logic when n_predict == -1 (infinite) or -2 (stop at context size).
931
+ if (params.interactive && n_remain <= 0 && params.n_predict >= 0) {
932
+ n_remain = params.n_predict;
933
+ is_interacting = true;
934
+ }
935
+ }
936
+
937
+ if (!path_session.empty() && params.prompt_cache_all && !params.prompt_cache_ro) {
938
+ LOG_TEE("\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str());
939
+ llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
940
+ }
941
+
942
+ llama_print_timings(ctx);
943
+ write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens);
944
+
945
+ if (ctx_guidance) { llama_free(ctx_guidance); }
946
+ llama_free(ctx);
947
+ llama_free_model(model);
948
+
949
+ llama_sampling_free(ctx_sampling);
950
+ llama_backend_free();
951
+
952
+ #ifndef LOG_DISABLE_LOGS
953
+ LOG_TEE("Log end\n");
954
+ #endif // LOG_DISABLE_LOGS
955
+
956
+ return 0;
957
+ }