@fugood/llama.node 0.3.12 → 0.3.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-cuda/arm64/llama-node.node +0 -0
- package/bin/linux-cuda/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/arm64/llama-node.node +0 -0
- package/bin/win32/arm64/node.lib +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/lib/binding.ts +2 -1
- package/package.json +1 -1
- package/src/LlamaCompletionWorker.cpp +14 -0
- package/src/LlamaContext.cpp +110 -79
- package/src/LlamaContext.h +1 -1
- package/src/common.hpp +1 -2
- package/src/llama.cpp/.github/workflows/build.yml +95 -13
- package/src/llama.cpp/.github/workflows/docker.yml +2 -0
- package/src/llama.cpp/.github/workflows/labeler.yml +1 -1
- package/src/llama.cpp/.github/workflows/server.yml +2 -0
- package/src/llama.cpp/common/CMakeLists.txt +23 -6
- package/src/llama.cpp/common/arg.cpp +292 -14
- package/src/llama.cpp/common/chat.cpp +1128 -315
- package/src/llama.cpp/common/chat.h +135 -0
- package/src/llama.cpp/common/common.cpp +27 -171
- package/src/llama.cpp/common/common.h +41 -73
- package/src/llama.cpp/common/json-schema-to-grammar.cpp +4 -5
- package/src/llama.cpp/common/json-schema-to-grammar.h +0 -1
- package/src/llama.cpp/common/llguidance.cpp +3 -3
- package/src/llama.cpp/common/log.cpp +1 -0
- package/src/llama.cpp/common/log.h +2 -1
- package/src/llama.cpp/common/{chat-template.hpp → minja/chat-template.hpp} +21 -7
- package/src/llama.cpp/common/{minja.hpp → minja/minja.hpp} +61 -14
- package/src/llama.cpp/common/ngram-cache.cpp +1 -0
- package/src/llama.cpp/common/sampling.cpp +93 -49
- package/src/llama.cpp/common/speculative.cpp +6 -5
- package/src/llama.cpp/common/speculative.h +1 -1
- package/src/llama.cpp/docs/build.md +47 -9
- package/src/llama.cpp/examples/cvector-generator/cvector-generator.cpp +3 -1
- package/src/llama.cpp/examples/embedding/embedding.cpp +1 -0
- package/src/llama.cpp/examples/export-lora/export-lora.cpp +4 -2
- package/src/llama.cpp/examples/imatrix/imatrix.cpp +4 -4
- package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +6 -5
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +1 -1
- package/src/llama.cpp/examples/llava/CMakeLists.txt +7 -0
- package/src/llama.cpp/examples/llava/clip.cpp +373 -107
- package/src/llama.cpp/examples/llava/clip.h +19 -3
- package/src/llama.cpp/examples/llava/gemma3-cli.cpp +341 -0
- package/src/llama.cpp/examples/llava/llava.cpp +4 -2
- package/src/llama.cpp/examples/llava/minicpmv-cli.cpp +30 -11
- package/src/llama.cpp/examples/lookahead/lookahead.cpp +1 -0
- package/src/llama.cpp/examples/main/main.cpp +73 -28
- package/src/llama.cpp/examples/parallel/parallel.cpp +1 -0
- package/src/llama.cpp/examples/passkey/passkey.cpp +1 -0
- package/src/llama.cpp/examples/perplexity/perplexity.cpp +1 -0
- package/src/llama.cpp/examples/quantize/quantize.cpp +1 -0
- package/src/llama.cpp/examples/run/linenoise.cpp/linenoise.cpp +882 -237
- package/src/llama.cpp/examples/run/linenoise.cpp/linenoise.h +35 -26
- package/src/llama.cpp/examples/run/run.cpp +115 -79
- package/src/llama.cpp/examples/server/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/server/httplib.h +381 -292
- package/src/llama.cpp/examples/server/server.cpp +134 -128
- package/src/llama.cpp/examples/server/utils.hpp +95 -106
- package/src/llama.cpp/examples/sycl/run-llama2.sh +2 -2
- package/src/llama.cpp/examples/tts/tts.cpp +251 -142
- package/src/llama.cpp/ggml/CMakeLists.txt +13 -1
- package/src/llama.cpp/ggml/include/ggml-alloc.h +1 -1
- package/src/llama.cpp/ggml/include/ggml-backend.h +3 -3
- package/src/llama.cpp/ggml/include/ggml-cpu.h +4 -1
- package/src/llama.cpp/ggml/include/ggml-metal.h +1 -1
- package/src/llama.cpp/ggml/include/ggml-vulkan.h +0 -2
- package/src/llama.cpp/ggml/include/ggml.h +6 -2
- package/src/llama.cpp/ggml/src/CMakeLists.txt +10 -7
- package/src/llama.cpp/ggml/src/ggml-alloc.c +24 -15
- package/src/llama.cpp/ggml/src/ggml-backend-impl.h +1 -1
- package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +58 -54
- package/src/llama.cpp/ggml/src/ggml-backend.cpp +10 -8
- package/src/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +3 -2
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/dup.cpp +3 -5
- package/src/llama.cpp/ggml/src/ggml-common.h +0 -2
- package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +132 -17
- package/src/llama.cpp/ggml/src/ggml-cpu/amx/amx.cpp +2 -1
- package/src/llama.cpp/ggml/src/ggml-cpu/cpu-feats-x86.cpp +4 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +2 -1
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h +156 -11
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +2235 -641
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +1572 -198
- package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +24 -5
- package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.cpp +259 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.h +61 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +288 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.h +17 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +9 -8
- package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +16 -3
- package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +14 -0
- package/src/llama.cpp/ggml/src/ggml-impl.h +1 -1
- package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +4 -5
- package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +235 -0
- package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +6 -2
- package/src/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +1 -0
- package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +246 -120
- package/src/llama.cpp/ggml/src/ggml-quants.c +114 -114
- package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +2 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +2 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +1 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +17 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +51 -10
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +33 -4
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +2 -2
- package/src/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +701 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/cpy.hpp +11 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +55 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +136 -4
- package/src/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +308 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/getrows.hpp +23 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +174 -728
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +75 -77
- package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +3 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp +13 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp +23 -0
- package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +949 -602
- package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +37 -3
- package/src/llama.cpp/ggml/src/ggml.c +9 -4
- package/src/llama.cpp/include/llama.h +32 -14
- package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +112 -0
- package/src/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +46 -0
- package/src/llama.cpp/requirements/requirements-all.txt +1 -0
- package/src/llama.cpp/requirements/requirements-tool_bench.txt +12 -0
- package/src/llama.cpp/requirements.txt +1 -0
- package/src/llama.cpp/src/llama-arch.cpp +21 -0
- package/src/llama.cpp/src/llama-arch.h +1 -0
- package/src/llama.cpp/src/llama-chat.cpp +1 -0
- package/src/llama.cpp/src/llama-grammar.cpp +183 -183
- package/src/llama.cpp/src/llama-grammar.h +13 -4
- package/src/llama.cpp/src/llama-impl.h +6 -6
- package/src/llama.cpp/src/llama-kv-cache.h +2 -1
- package/src/llama.cpp/src/llama-mmap.cpp +11 -1
- package/src/llama.cpp/src/llama-mmap.h +1 -0
- package/src/llama.cpp/src/llama-model.cpp +70 -6
- package/src/llama.cpp/src/llama-sampling.cpp +174 -67
- package/src/llama.cpp/src/llama-vocab.cpp +12 -0
- package/src/llama.cpp/src/llama.cpp +154 -5
- package/src/llama.cpp/src/unicode.cpp +9 -2
- package/src/llama.cpp/tests/test-backend-ops.cpp +171 -115
- package/src/llama.cpp/tests/test-chat-template.cpp +32 -22
- package/src/llama.cpp/tests/test-chat.cpp +691 -325
- package/src/llama.cpp/tests/test-gguf.cpp +4 -4
- package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +63 -63
- package/src/llama.cpp/tests/test-quantize-fns.cpp +1 -9
- package/src/llama.cpp/tests/test-sampling.cpp +15 -0
- package/src/llama.cpp/Sources/llama/llama.h +0 -4
- package/src/llama.cpp/common/chat.hpp +0 -52
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
#include "log.h"
|
|
5
5
|
#include "sampling.h"
|
|
6
6
|
#include "llama.h"
|
|
7
|
-
#include "chat
|
|
7
|
+
#include "chat.h"
|
|
8
8
|
|
|
9
9
|
#include <cstdio>
|
|
10
10
|
#include <cstring>
|
|
@@ -31,8 +31,6 @@
|
|
|
31
31
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
|
32
32
|
#endif
|
|
33
33
|
|
|
34
|
-
static const char * DEFAULT_SYSTEM_MESSAGE = "You are a helpful assistant";
|
|
35
|
-
|
|
36
34
|
static llama_context ** g_ctx;
|
|
37
35
|
static llama_model ** g_model;
|
|
38
36
|
static common_sampler ** g_smpl;
|
|
@@ -47,8 +45,8 @@ static void print_usage(int argc, char ** argv) {
|
|
|
47
45
|
(void) argc;
|
|
48
46
|
|
|
49
47
|
LOG("\nexample usage:\n");
|
|
50
|
-
LOG("\n text generation: %s -m your_model.gguf -p \"I believe the meaning of life is\" -n 128\n", argv[0]);
|
|
51
|
-
LOG("\n chat (conversation): %s -m your_model.gguf -
|
|
48
|
+
LOG("\n text generation: %s -m your_model.gguf -p \"I believe the meaning of life is\" -n 128 -no-cnv\n", argv[0]);
|
|
49
|
+
LOG("\n chat (conversation): %s -m your_model.gguf -sys \"You are a helpful assistant\"\n", argv[0]);
|
|
52
50
|
LOG("\n");
|
|
53
51
|
}
|
|
54
52
|
|
|
@@ -158,7 +156,7 @@ int main(int argc, char ** argv) {
|
|
|
158
156
|
}
|
|
159
157
|
|
|
160
158
|
const llama_vocab * vocab = llama_model_get_vocab(model);
|
|
161
|
-
auto chat_templates =
|
|
159
|
+
auto chat_templates = common_chat_templates_init(model, params.chat_template);
|
|
162
160
|
|
|
163
161
|
LOG_INF("%s: llama threadpool init, n_threads = %d\n", __func__, (int) params.cpuparams.n_threads);
|
|
164
162
|
|
|
@@ -201,7 +199,7 @@ int main(int argc, char ** argv) {
|
|
|
201
199
|
}
|
|
202
200
|
|
|
203
201
|
// auto enable conversation mode if chat template is available
|
|
204
|
-
const bool has_chat_template = chat_templates.
|
|
202
|
+
const bool has_chat_template = common_chat_templates_was_explicit(chat_templates.get());
|
|
205
203
|
if (params.conversation_mode == COMMON_CONVERSATION_MODE_AUTO) {
|
|
206
204
|
if (has_chat_template) {
|
|
207
205
|
LOG_INF("%s: chat template is available, enabling conversation mode (disable it with -no-cnv)\n", __func__);
|
|
@@ -219,7 +217,11 @@ int main(int argc, char ** argv) {
|
|
|
219
217
|
// print chat template example in conversation mode
|
|
220
218
|
if (params.conversation_mode) {
|
|
221
219
|
if (params.enable_chat_template) {
|
|
222
|
-
|
|
220
|
+
if (!params.prompt.empty() && params.system_prompt.empty()) {
|
|
221
|
+
LOG_WRN("*** User-specified prompt will pre-start conversation, did you mean to set --system-prompt (-sys) instead?\n");
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(chat_templates.get(), params.use_jinja).c_str());
|
|
223
225
|
} else {
|
|
224
226
|
LOG_INF("%s: in-suffix/prefix is specified, chat template will be disabled\n", __func__);
|
|
225
227
|
}
|
|
@@ -263,21 +265,45 @@ int main(int argc, char ** argv) {
|
|
|
263
265
|
|
|
264
266
|
std::vector<llama_token> embd_inp;
|
|
265
267
|
|
|
268
|
+
bool waiting_for_first_input = false;
|
|
266
269
|
auto chat_add_and_format = [&chat_msgs, &chat_templates](const std::string & role, const std::string & content) {
|
|
267
|
-
common_chat_msg new_msg
|
|
268
|
-
|
|
269
|
-
|
|
270
|
+
common_chat_msg new_msg;
|
|
271
|
+
new_msg.role = role;
|
|
272
|
+
new_msg.content = content;
|
|
273
|
+
auto formatted = common_chat_format_single(chat_templates.get(), chat_msgs, new_msg, role == "user", g_params->use_jinja);
|
|
274
|
+
chat_msgs.push_back(new_msg);
|
|
270
275
|
LOG_DBG("formatted: '%s'\n", formatted.c_str());
|
|
271
276
|
return formatted;
|
|
272
277
|
};
|
|
273
278
|
|
|
279
|
+
std::string prompt;
|
|
274
280
|
{
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
281
|
+
if (params.conversation_mode && params.enable_chat_template) {
|
|
282
|
+
if (!params.system_prompt.empty()) {
|
|
283
|
+
// format the system prompt (will use template default if empty)
|
|
284
|
+
chat_add_and_format("system", params.system_prompt);
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
if (!params.prompt.empty()) {
|
|
288
|
+
// format and append the user prompt
|
|
289
|
+
chat_add_and_format("user", params.prompt);
|
|
290
|
+
} else {
|
|
291
|
+
waiting_for_first_input = true;
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
if (!params.system_prompt.empty() || !params.prompt.empty()) {
|
|
295
|
+
common_chat_templates_inputs inputs;
|
|
296
|
+
inputs.messages = chat_msgs;
|
|
297
|
+
inputs.add_generation_prompt = !params.prompt.empty();
|
|
298
|
+
|
|
299
|
+
prompt = common_chat_templates_apply(chat_templates.get(), inputs).prompt;
|
|
300
|
+
}
|
|
301
|
+
} else {
|
|
278
302
|
// otherwise use the prompt as is
|
|
279
|
-
|
|
280
|
-
|
|
303
|
+
prompt = params.prompt;
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
if (params.interactive_first || !prompt.empty() || session_tokens.empty()) {
|
|
281
307
|
LOG_DBG("tokenize the prompt\n");
|
|
282
308
|
embd_inp = common_tokenize(ctx, prompt, true, true);
|
|
283
309
|
} else {
|
|
@@ -290,7 +316,7 @@ int main(int argc, char ** argv) {
|
|
|
290
316
|
}
|
|
291
317
|
|
|
292
318
|
// Should not run without any tokens
|
|
293
|
-
if (embd_inp.empty()) {
|
|
319
|
+
if (!waiting_for_first_input && embd_inp.empty()) {
|
|
294
320
|
if (add_bos) {
|
|
295
321
|
embd_inp.push_back(llama_vocab_bos(vocab));
|
|
296
322
|
LOG_WRN("embd_inp was considered empty and bos was added: %s\n", string_from(ctx, embd_inp).c_str());
|
|
@@ -350,7 +376,12 @@ int main(int argc, char ** argv) {
|
|
|
350
376
|
}
|
|
351
377
|
|
|
352
378
|
if (params.conversation_mode) {
|
|
353
|
-
params.
|
|
379
|
+
if (params.single_turn && !params.prompt.empty()) {
|
|
380
|
+
params.interactive = false;
|
|
381
|
+
params.interactive_first = false;
|
|
382
|
+
} else {
|
|
383
|
+
params.interactive_first = true;
|
|
384
|
+
}
|
|
354
385
|
}
|
|
355
386
|
|
|
356
387
|
// enable interactive mode if interactive start is specified
|
|
@@ -474,8 +505,8 @@ int main(int argc, char ** argv) {
|
|
|
474
505
|
LOG_INF( " - Press Ctrl+C to interject at any time.\n");
|
|
475
506
|
#endif
|
|
476
507
|
LOG_INF( "%s", control_message);
|
|
477
|
-
if (params.conversation_mode && params.enable_chat_template && params.
|
|
478
|
-
LOG_INF( " -
|
|
508
|
+
if (params.conversation_mode && params.enable_chat_template && params.system_prompt.empty()) {
|
|
509
|
+
LOG_INF( " - Not using system message. To change it, set a different value via -sys PROMPT\n");
|
|
479
510
|
}
|
|
480
511
|
LOG_INF("\n");
|
|
481
512
|
|
|
@@ -755,11 +786,14 @@ int main(int argc, char ** argv) {
|
|
|
755
786
|
|
|
756
787
|
// check for reverse prompt using special tokens
|
|
757
788
|
llama_token last_token = common_sampler_last(smpl);
|
|
758
|
-
|
|
759
|
-
if (
|
|
760
|
-
|
|
789
|
+
for (auto token : antiprompt_token) {
|
|
790
|
+
if (token == last_token) {
|
|
791
|
+
if (params.interactive) {
|
|
792
|
+
is_interacting = true;
|
|
793
|
+
}
|
|
794
|
+
is_antiprompt = true;
|
|
795
|
+
break;
|
|
761
796
|
}
|
|
762
|
-
is_antiprompt = true;
|
|
763
797
|
}
|
|
764
798
|
|
|
765
799
|
if (is_antiprompt) {
|
|
@@ -768,7 +802,7 @@ int main(int argc, char ** argv) {
|
|
|
768
802
|
}
|
|
769
803
|
|
|
770
804
|
// deal with end of generation tokens in interactive mode
|
|
771
|
-
if (llama_vocab_is_eog(vocab, common_sampler_last(smpl))) {
|
|
805
|
+
if (!waiting_for_first_input && llama_vocab_is_eog(vocab, common_sampler_last(smpl))) {
|
|
772
806
|
LOG_DBG("found an EOG token\n");
|
|
773
807
|
|
|
774
808
|
if (params.interactive) {
|
|
@@ -788,12 +822,17 @@ int main(int argc, char ** argv) {
|
|
|
788
822
|
}
|
|
789
823
|
|
|
790
824
|
// if current token is not EOG, we add it to current assistant message
|
|
791
|
-
if (params.conversation_mode) {
|
|
825
|
+
if (params.conversation_mode && !waiting_for_first_input) {
|
|
792
826
|
const auto id = common_sampler_last(smpl);
|
|
793
827
|
assistant_ss << common_token_to_piece(ctx, id, false);
|
|
828
|
+
|
|
829
|
+
if (!prompt.empty()) {
|
|
830
|
+
prompt.clear();
|
|
831
|
+
is_interacting = false;
|
|
832
|
+
}
|
|
794
833
|
}
|
|
795
834
|
|
|
796
|
-
if (n_past > 0 && is_interacting) {
|
|
835
|
+
if ((n_past > 0 || waiting_for_first_input) && is_interacting) {
|
|
797
836
|
LOG_DBG("waiting for user input\n");
|
|
798
837
|
|
|
799
838
|
if (params.conversation_mode) {
|
|
@@ -883,11 +922,17 @@ int main(int argc, char ** argv) {
|
|
|
883
922
|
input_echo = false; // do not echo this again
|
|
884
923
|
}
|
|
885
924
|
|
|
886
|
-
if (n_past > 0) {
|
|
925
|
+
if (n_past > 0 || waiting_for_first_input) {
|
|
887
926
|
if (is_interacting) {
|
|
888
927
|
common_sampler_reset(smpl);
|
|
889
928
|
}
|
|
890
929
|
is_interacting = false;
|
|
930
|
+
|
|
931
|
+
if (waiting_for_first_input && params.single_turn) {
|
|
932
|
+
params.interactive = false;
|
|
933
|
+
params.interactive_first = false;
|
|
934
|
+
}
|
|
935
|
+
waiting_for_first_input = false;
|
|
891
936
|
}
|
|
892
937
|
}
|
|
893
938
|
|