@fugood/llama.node 1.1.6 → 1.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/lib/binding.ts +4 -0
  2. package/lib/index.js +6 -1
  3. package/lib/index.ts +6 -0
  4. package/lib/version.js +5 -0
  5. package/lib/version.ts +2 -0
  6. package/package.json +14 -14
  7. package/scripts/llama.cpp.patch +9 -9
  8. package/src/LlamaCompletionWorker.cpp +73 -20
  9. package/src/LlamaCompletionWorker.h +8 -0
  10. package/src/llama.cpp/CMakeLists.txt +2 -0
  11. package/src/llama.cpp/common/arg.cpp +124 -40
  12. package/src/llama.cpp/common/chat-parser.cpp +9 -1
  13. package/src/llama.cpp/common/chat.cpp +312 -9
  14. package/src/llama.cpp/common/chat.h +4 -1
  15. package/src/llama.cpp/common/common.cpp +54 -0
  16. package/src/llama.cpp/common/common.h +41 -7
  17. package/src/llama.cpp/ggml/CMakeLists.txt +2 -0
  18. package/src/llama.cpp/ggml/include/ggml-opt.h +25 -6
  19. package/src/llama.cpp/ggml/include/ggml-zdnn.h +16 -0
  20. package/src/llama.cpp/ggml/include/ggml.h +28 -2
  21. package/src/llama.cpp/ggml/src/CMakeLists.txt +1 -0
  22. package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +1 -1
  23. package/src/llama.cpp/ggml/src/ggml-cpu/arch/x86/repack.cpp +1136 -1077
  24. package/src/llama.cpp/ggml/src/ggml-cpu/arch-fallback.h +14 -0
  25. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +6 -0
  26. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +21 -24
  27. package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +16 -7
  28. package/src/llama.cpp/ggml/src/ggml-cpu/ops.cpp +63 -2
  29. package/src/llama.cpp/ggml/src/ggml-cpu/ops.h +1 -1
  30. package/src/llama.cpp/ggml/src/ggml-cpu/repack.cpp +200 -51
  31. package/src/llama.cpp/ggml/src/ggml-cpu/repack.h +11 -0
  32. package/src/llama.cpp/ggml/src/ggml-cpu/traits.cpp +2 -2
  33. package/src/llama.cpp/ggml/src/ggml-cpu/traits.h +1 -1
  34. package/src/llama.cpp/include/llama.h +25 -0
  35. package/src/llama.cpp/src/llama-batch.cpp +1 -1
  36. package/src/llama.cpp/src/llama-chat.cpp +2 -4
  37. package/src/llama.cpp/src/llama-context.cpp +29 -17
  38. package/src/llama.cpp/src/llama-context.h +6 -5
  39. package/src/llama.cpp/src/llama-kv-cache-unified-iswa.cpp +12 -6
  40. package/src/llama.cpp/src/llama-kv-cache-unified-iswa.h +2 -2
  41. package/src/llama.cpp/src/llama-kv-cache-unified.cpp +89 -69
  42. package/src/llama.cpp/src/llama-kv-cache-unified.h +2 -2
  43. package/src/llama.cpp/src/llama-memory-hybrid.cpp +6 -2
  44. package/src/llama.cpp/src/llama-memory-hybrid.h +2 -2
  45. package/src/llama.cpp/src/llama-memory-recurrent.cpp +6 -2
  46. package/src/llama.cpp/src/llama-memory-recurrent.h +2 -2
  47. package/src/llama.cpp/src/llama-memory.h +2 -2
  48. package/src/llama.cpp/src/llama-model.cpp +1 -0
  49. package/src/llama.cpp/src/llama-model.h +1 -0
  50. package/src/llama.cpp/src/llama-quant.cpp +1 -1
  51. package/src/llama.cpp/src/llama-vocab.cpp +2 -1
package/lib/binding.ts CHANGED
@@ -167,6 +167,10 @@ export type LlamaCompletionResult = {
167
167
 
168
168
  export type LlamaCompletionToken = {
169
169
  token: string
170
+ content?: string
171
+ reasoning_content?: string
172
+ tool_calls?: ToolCall[]
173
+ accumulated_text?: string
170
174
  }
171
175
 
172
176
  export type TokenizeResult = {
package/lib/index.js CHANGED
@@ -23,9 +23,10 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
23
23
  });
24
24
  };
25
25
  Object.defineProperty(exports, "__esModule", { value: true });
26
- exports.loadLlamaModelInfo = exports.initLlama = exports.loadModel = exports.toggleNativeLog = exports.MTMD_DEFAULT_MEDIA_MARKER = void 0;
26
+ exports.BuildInfo = exports.loadLlamaModelInfo = exports.initLlama = exports.loadModel = exports.toggleNativeLog = exports.MTMD_DEFAULT_MEDIA_MARKER = void 0;
27
27
  exports.addNativeLogListener = addNativeLogListener;
28
28
  const binding_1 = require("./binding");
29
+ const version_1 = require("./version");
29
30
  __exportStar(require("./binding"), exports);
30
31
  exports.MTMD_DEFAULT_MEDIA_MARKER = '<__media__>';
31
32
  const mods = {};
@@ -259,3 +260,7 @@ const loadLlamaModelInfo = (path) => __awaiter(void 0, void 0, void 0, function*
259
260
  return mods[variant].LlamaContext.loadModelInfo(path, modelInfoSkip);
260
261
  });
261
262
  exports.loadLlamaModelInfo = loadLlamaModelInfo;
263
+ exports.BuildInfo = {
264
+ number: version_1.BUILD_NUMBER,
265
+ commit: version_1.BUILD_COMMIT,
266
+ };
package/lib/index.ts CHANGED
@@ -17,6 +17,7 @@ import type {
17
17
  Tool,
18
18
  GGUFModelInfo,
19
19
  } from './binding'
20
+ import { BUILD_NUMBER, BUILD_COMMIT } from './version'
20
21
 
21
22
  export * from './binding'
22
23
 
@@ -353,3 +354,8 @@ export const loadLlamaModelInfo = async (path: string): Promise<GGUFModelInfo> =
353
354
  refreshNativeLogSetup()
354
355
  return mods[variant].LlamaContext.loadModelInfo(path, modelInfoSkip)
355
356
  }
357
+
358
+ export const BuildInfo = {
359
+ number: BUILD_NUMBER,
360
+ commit: BUILD_COMMIT,
361
+ }
package/lib/version.js ADDED
@@ -0,0 +1,5 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.BUILD_COMMIT = exports.BUILD_NUMBER = void 0;
4
+ exports.BUILD_NUMBER = '6096';
5
+ exports.BUILD_COMMIT = 'fd1234cb';
package/lib/version.ts ADDED
@@ -0,0 +1,2 @@
1
+ export const BUILD_NUMBER = '6096';
2
+ export const BUILD_COMMIT = 'fd1234cb';
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@fugood/llama.node",
3
3
  "access": "public",
4
- "version": "1.1.6",
4
+ "version": "1.1.7",
5
5
  "description": "An another Node binding of llama.cpp",
6
6
  "main": "lib/index.js",
7
7
  "scripts": {
@@ -71,19 +71,19 @@
71
71
  "CMakeLists.txt"
72
72
  ],
73
73
  "optionalDependencies": {
74
- "@fugood/node-llama-linux-x64": "1.1.6",
75
- "@fugood/node-llama-linux-x64-vulkan": "1.1.6",
76
- "@fugood/node-llama-linux-x64-cuda": "1.1.6",
77
- "@fugood/node-llama-linux-arm64": "1.1.6",
78
- "@fugood/node-llama-linux-arm64-vulkan": "1.1.6",
79
- "@fugood/node-llama-linux-arm64-cuda": "1.1.6",
80
- "@fugood/node-llama-win32-x64": "1.1.6",
81
- "@fugood/node-llama-win32-x64-vulkan": "1.1.6",
82
- "@fugood/node-llama-win32-x64-cuda": "1.1.6",
83
- "@fugood/node-llama-win32-arm64": "1.1.6",
84
- "@fugood/node-llama-win32-arm64-vulkan": "1.1.6",
85
- "@fugood/node-llama-darwin-x64": "1.1.6",
86
- "@fugood/node-llama-darwin-arm64": "1.1.6"
74
+ "@fugood/node-llama-linux-x64": "1.1.7",
75
+ "@fugood/node-llama-linux-x64-vulkan": "1.1.7",
76
+ "@fugood/node-llama-linux-x64-cuda": "1.1.7",
77
+ "@fugood/node-llama-linux-arm64": "1.1.7",
78
+ "@fugood/node-llama-linux-arm64-vulkan": "1.1.7",
79
+ "@fugood/node-llama-linux-arm64-cuda": "1.1.7",
80
+ "@fugood/node-llama-win32-x64": "1.1.7",
81
+ "@fugood/node-llama-win32-x64-vulkan": "1.1.7",
82
+ "@fugood/node-llama-win32-x64-cuda": "1.1.7",
83
+ "@fugood/node-llama-win32-arm64": "1.1.7",
84
+ "@fugood/node-llama-win32-arm64-vulkan": "1.1.7",
85
+ "@fugood/node-llama-darwin-x64": "1.1.7",
86
+ "@fugood/node-llama-darwin-arm64": "1.1.7"
87
87
  },
88
88
  "devDependencies": {
89
89
  "@babel/preset-env": "^7.24.4",
@@ -1,5 +1,5 @@
1
1
  diff --git a/src/llama.cpp/common/chat.cpp b/src/llama.cpp/common/chat.cpp
2
- index 60805ab3..71b4236a 100644
2
+ index 23d3828f9..ca48af00c 100644
3
3
  --- a/src/llama.cpp/common/chat.cpp
4
4
  +++ b/src/llama.cpp/common/chat.cpp
5
5
  @@ -6,9 +6,6 @@
@@ -30,7 +30,7 @@ index 60805ab3..71b4236a 100644
30
30
  json messages;
31
31
  json tools;
32
32
  diff --git a/src/llama.cpp/common/chat.h b/src/llama.cpp/common/chat.h
33
- index b014f9f0..3a868797 100644
33
+ index d1e480c91..437e64e29 100644
34
34
  --- a/src/llama.cpp/common/chat.h
35
35
  +++ b/src/llama.cpp/common/chat.h
36
36
  @@ -9,7 +9,18 @@
@@ -54,10 +54,10 @@ index b014f9f0..3a868797 100644
54
54
  struct common_chat_tool_call {
55
55
  std::string name;
56
56
  diff --git a/src/llama.cpp/common/common.cpp b/src/llama.cpp/common/common.cpp
57
- index c6962d1d..ba5a4786 100644
57
+ index 67dd5404f..909a97c66 100644
58
58
  --- a/src/llama.cpp/common/common.cpp
59
59
  +++ b/src/llama.cpp/common/common.cpp
60
- @@ -1116,6 +1116,7 @@ struct llama_model_params common_model_params_to_llama(common_params & params) {
60
+ @@ -1117,6 +1117,7 @@ struct llama_model_params common_model_params_to_llama(common_params & params) {
61
61
  mparams.n_gpu_layers = params.n_gpu_layers;
62
62
  }
63
63
 
@@ -66,11 +66,11 @@ index c6962d1d..ba5a4786 100644
66
66
  mparams.split_mode = params.split_mode;
67
67
  mparams.tensor_split = params.tensor_split;
68
68
  diff --git a/src/llama.cpp/common/common.h b/src/llama.cpp/common/common.h
69
- index 6c1c7ee2..c3eb0552 100644
69
+ index 75596e6b3..0e04694c8 100644
70
70
  --- a/src/llama.cpp/common/common.h
71
71
  +++ b/src/llama.cpp/common/common.h
72
- @@ -242,6 +242,7 @@ enum common_reasoning_format {
73
- };
72
+ @@ -267,6 +267,7 @@ struct lr_opt {
73
+ struct ggml_opt_optimizer_params common_opt_lr_pars(void * userdata);
74
74
 
75
75
  struct common_params {
76
76
  + bool vocab_only = false;
@@ -78,7 +78,7 @@ index 6c1c7ee2..c3eb0552 100644
78
78
  int32_t n_ctx = 4096; // context size
79
79
  int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
80
80
  diff --git a/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt b/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt
81
- index f188d163..0c33acad 100644
81
+ index ce0a3e128..df9300224 100644
82
82
  --- a/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt
83
83
  +++ b/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt
84
84
  @@ -106,7 +106,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
@@ -91,7 +91,7 @@ index f188d163..0c33acad 100644
91
91
  check_cxx_compiler_flag(-mfp16-format=ieee GGML_COMPILER_SUPPORTS_FP16_FORMAT_I3E)
92
92
  if (NOT "${GGML_COMPILER_SUPPORTS_FP16_FORMAT_I3E}" STREQUAL "")
93
93
  diff --git a/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt b/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt
94
- index b97e7bf9..c3eb9519 100644
94
+ index b97e7bf99..c3eb9519f 100644
95
95
  --- a/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt
96
96
  +++ b/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt
97
97
  @@ -111,7 +111,7 @@ if (Vulkan_FOUND)
@@ -55,6 +55,32 @@ LlamaCompletionWorker::~LlamaCompletionWorker() {
55
55
  }
56
56
  }
57
57
 
58
+ LlamaCompletionWorker::PartialOutput LlamaCompletionWorker::getPartialOutput(const std::string &generated_text) {
59
+ PartialOutput result;
60
+
61
+ try {
62
+ common_chat_syntax chat_syntax;
63
+ chat_syntax.format = static_cast<common_chat_format>(_chat_format);
64
+ chat_syntax.thinking_forced_open = _thinking_forced_open;
65
+
66
+ // Set reasoning format using the common function
67
+ chat_syntax.reasoning_format = common_reasoning_format_from_name(_reasoning_format);
68
+
69
+ chat_syntax.parse_tool_calls = true;
70
+
71
+ // Use is_partial=true for streaming partial output
72
+ common_chat_msg parsed_msg = common_chat_parse(generated_text, true, chat_syntax);
73
+
74
+ result.content = parsed_msg.content;
75
+ result.reasoning_content = parsed_msg.reasoning_content;
76
+ result.tool_calls = parsed_msg.tool_calls;
77
+ } catch (const std::exception &e) {
78
+ // If parsing fails, leave content empty - this is expected for partial content
79
+ }
80
+
81
+ return result;
82
+ }
83
+
58
84
  void LlamaCompletionWorker::Execute() {
59
85
  _sess->get_mutex().lock();
60
86
  const auto t_main_start = ggml_time_us();
@@ -222,6 +248,13 @@ void LlamaCompletionWorker::Execute() {
222
248
 
223
249
  // sample the next token
224
250
  llama_token new_token_id = common_sampler_sample(sampling.get(), ctx, -1);
251
+
252
+ // is it an end of generation?
253
+ if (llama_vocab_is_eog(vocab, new_token_id)) {
254
+ _result.stopped_eos = true;
255
+ break;
256
+ }
257
+
225
258
  if (_next_token_uses_guide_token && !_guide_tokens.empty() &&
226
259
  !llama_vocab_is_control(vocab, new_token_id) &&
227
260
  !llama_vocab_is_eog(vocab, new_token_id)) {
@@ -250,21 +283,49 @@ void LlamaCompletionWorker::Execute() {
250
283
  if (_has_callback) {
251
284
  // TODO: When we got possible stop words (startsWith)
252
285
  // we should avoid calling the callback, wait for the next token
253
- const char *c_token = strdup(token.c_str());
254
- _tsfn.BlockingCall(c_token, [](Napi::Env env, Napi::Function jsCallback,
255
- const char *value) {
286
+ struct TokenData {
287
+ std::string token;
288
+ std::string content;
289
+ std::string reasoning_content;
290
+ std::vector<common_chat_tool_call> tool_calls;
291
+ std::string accumulated_text;
292
+ };
293
+
294
+ auto partial = getPartialOutput(_result.text);
295
+ TokenData *token_data = new TokenData{token, partial.content, partial.reasoning_content, partial.tool_calls, _result.text};
296
+
297
+ _tsfn.BlockingCall(token_data, [](Napi::Env env, Napi::Function jsCallback,
298
+ TokenData *data) {
256
299
  auto obj = Napi::Object::New(env);
257
- obj.Set("token", Napi::String::New(env, value));
258
- delete value;
300
+ obj.Set("token", Napi::String::New(env, data->token));
301
+ if (!data->content.empty()) {
302
+ obj.Set("content", Napi::String::New(env, data->content));
303
+ }
304
+ if (!data->reasoning_content.empty()) {
305
+ obj.Set("reasoning_content", Napi::String::New(env, data->reasoning_content));
306
+ }
307
+ if (!data->tool_calls.empty()) {
308
+ Napi::Array tool_calls = Napi::Array::New(env);
309
+ for (size_t i = 0; i < data->tool_calls.size(); i++) {
310
+ const auto &tc = data->tool_calls[i];
311
+ Napi::Object tool_call = Napi::Object::New(env);
312
+ tool_call.Set("type", "function");
313
+ Napi::Object function = Napi::Object::New(env);
314
+ function.Set("name", tc.name);
315
+ function.Set("arguments", tc.arguments);
316
+ tool_call.Set("function", function);
317
+ if (!tc.id.empty()) {
318
+ tool_call.Set("id", tc.id);
319
+ }
320
+ tool_calls.Set(i, tool_call);
321
+ }
322
+ obj.Set("tool_calls", tool_calls);
323
+ }
324
+ obj.Set("accumulated_text", Napi::String::New(env, data->accumulated_text));
325
+ delete data;
259
326
  jsCallback.Call({obj});
260
327
  });
261
328
  }
262
- // is it an end of generation?
263
- if (llama_vocab_is_eog(vocab, new_token_id)) {
264
- _result.stopped_eos = true;
265
- // TODO: EOS token should be cut
266
- break;
267
- }
268
329
  // check for stop words
269
330
  if (!_stop_words.empty()) {
270
331
  const size_t stop_pos =
@@ -316,15 +377,7 @@ void LlamaCompletionWorker::OnOK() {
316
377
 
317
378
  chat_syntax.thinking_forced_open = _thinking_forced_open;
318
379
 
319
- if (_reasoning_format == "deepseek") {
320
- chat_syntax.reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK;
321
- } else if (_reasoning_format == "deepseek-legacy") {
322
- chat_syntax.reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY;
323
- } else if (_reasoning_format == "auto") {
324
- chat_syntax.reasoning_format = COMMON_REASONING_FORMAT_AUTO;
325
- } else {
326
- chat_syntax.reasoning_format = COMMON_REASONING_FORMAT_NONE;
327
- }
380
+ chat_syntax.reasoning_format = common_reasoning_format_from_name(_reasoning_format);
328
381
  common_chat_msg message = common_chat_parse(
329
382
  _result.text,
330
383
  false,
@@ -42,6 +42,14 @@ protected:
42
42
  void OnError(const Napi::Error &err) override;
43
43
 
44
44
  private:
45
+ struct PartialOutput {
46
+ std::string content = "";
47
+ std::string reasoning_content = "";
48
+ std::vector<common_chat_tool_call> tool_calls;
49
+ };
50
+
51
+ PartialOutput getPartialOutput(const std::string &generated_text);
52
+
45
53
  LlamaSessionPtr _sess;
46
54
  common_params _params;
47
55
  std::vector<std::string> _stop_words;
@@ -12,6 +12,8 @@ if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)
12
12
  set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
13
13
  endif()
14
14
 
15
+ message("CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}")
16
+
15
17
  # Add path to modules
16
18
  list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
17
19
 
@@ -749,6 +749,39 @@ std::pair<long, std::vector<char>> common_remote_get_content(const std::string &
749
749
  // utils
750
750
  //
751
751
 
752
+ // Helper function to parse tensor buffer override strings
753
+ static void parse_tensor_buffer_overrides(const std::string & value, std::vector<llama_model_tensor_buft_override> & overrides) {
754
+ std::map<std::string, ggml_backend_buffer_type_t> buft_list;
755
+ for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
756
+ auto * dev = ggml_backend_dev_get(i);
757
+ auto * buft = ggml_backend_dev_buffer_type(dev);
758
+ if (buft) {
759
+ buft_list[ggml_backend_buft_name(buft)] = buft;
760
+ }
761
+ }
762
+
763
+ for (const auto & override : string_split<std::string>(value, ',')) {
764
+ std::string::size_type pos = override.find('=');
765
+ if (pos == std::string::npos) {
766
+ throw std::invalid_argument("invalid value");
767
+ }
768
+ std::string tensor_name = override.substr(0, pos);
769
+ std::string buffer_type = override.substr(pos + 1);
770
+
771
+ if (buft_list.find(buffer_type) == buft_list.end()) {
772
+ printf("Available buffer types:\n");
773
+ for (const auto & it : buft_list) {
774
+ printf(" %s\n", ggml_backend_buft_name(it.second));
775
+ }
776
+ throw std::invalid_argument("unknown buffer type");
777
+ }
778
+ // keep strings alive and avoid leaking memory by storing them in a static vector
779
+ static std::list<std::string> buft_overrides;
780
+ buft_overrides.push_back(tensor_name);
781
+ overrides.push_back({buft_overrides.back().c_str(), buft_list.at(buffer_type)});
782
+ }
783
+ }
784
+
752
785
  struct handle_model_result {
753
786
  bool found_mmproj = false;
754
787
  common_params_model mmproj;
@@ -993,6 +1026,10 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context
993
1026
  params.tensor_buft_overrides.push_back({nullptr, nullptr});
994
1027
  }
995
1028
 
1029
+ if (!params.speculative.tensor_buft_overrides.empty()) {
1030
+ params.speculative.tensor_buft_overrides.push_back({nullptr, nullptr});
1031
+ }
1032
+
996
1033
  if (!params.chat_template.empty() && !common_chat_verify_template(params.chat_template, params.use_jinja)) {
997
1034
  throw std::runtime_error(string_format(
998
1035
  "error: the supplied chat template is not supported: %s%s\n",
@@ -1201,6 +1238,7 @@ bool common_params_parse(int argc, char ** argv, common_params & params, llama_e
1201
1238
  common_params_print_completion(ctx_arg);
1202
1239
  exit(0);
1203
1240
  }
1241
+ params.lr.init();
1204
1242
  } catch (const std::invalid_argument & ex) {
1205
1243
  fprintf(stderr, "%s\n", ex.what());
1206
1244
  ctx_arg.params = params_org;
@@ -1469,6 +1507,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
1469
1507
  params.swa_full = true;
1470
1508
  }
1471
1509
  ).set_env("LLAMA_ARG_SWA_FULL"));
1510
+ add_opt(common_arg(
1511
+ {"--swa-checkpoints"}, "N",
1512
+ string_format("max number of SWA checkpoints per slot to create (default: %d)\n"
1513
+ "[(more info)](https://github.com/ggml-org/llama.cpp/pull/15293)", params.n_swa_checkpoints),
1514
+ [](common_params & params, int value) {
1515
+ params.n_swa_checkpoints = value;
1516
+ }
1517
+ ).set_env("LLAMA_ARG_SWA_CHECKPOINTS").set_examples({LLAMA_EXAMPLE_SERVER}));
1472
1518
  add_opt(common_arg(
1473
1519
  {"--kv-unified", "-kvu"},
1474
1520
  string_format("use single unified KV buffer for the KV cache of all sequences (default: %s)\n"
@@ -2349,40 +2395,15 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
2349
2395
  add_opt(common_arg(
2350
2396
  {"--override-tensor", "-ot"}, "<tensor name pattern>=<buffer type>,...",
2351
2397
  "override tensor buffer type", [](common_params & params, const std::string & value) {
2352
- /* static */ std::map<std::string, ggml_backend_buffer_type_t> buft_list;
2353
- if (buft_list.empty()) {
2354
- // enumerate all the devices and add their buffer types to the list
2355
- for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
2356
- auto * dev = ggml_backend_dev_get(i);
2357
- auto * buft = ggml_backend_dev_buffer_type(dev);
2358
- if (buft) {
2359
- buft_list[ggml_backend_buft_name(buft)] = buft;
2360
- }
2361
- }
2362
- }
2363
-
2364
- for (const auto & override : string_split<std::string>(value, ',')) {
2365
- std::string::size_type pos = override.find('=');
2366
- if (pos == std::string::npos) {
2367
- throw std::invalid_argument("invalid value");
2368
- }
2369
- std::string tensor_name = override.substr(0, pos);
2370
- std::string buffer_type = override.substr(pos + 1);
2371
-
2372
- if (buft_list.find(buffer_type) == buft_list.end()) {
2373
- printf("Available buffer types:\n");
2374
- for (const auto & it : buft_list) {
2375
- printf(" %s\n", ggml_backend_buft_name(it.second));
2376
- }
2377
- throw std::invalid_argument("unknown buffer type");
2378
- }
2379
- // keep strings alive and avoid leaking memory by storing them in a static vector
2380
- static std::list<std::string> buft_overrides;
2381
- buft_overrides.push_back(tensor_name);
2382
- params.tensor_buft_overrides.push_back({buft_overrides.back().c_str(), buft_list.at(buffer_type)});
2383
- }
2398
+ parse_tensor_buffer_overrides(value, params.tensor_buft_overrides);
2384
2399
  }
2385
2400
  ));
2401
+ add_opt(common_arg(
2402
+ {"--override-tensor-draft", "-otd"}, "<tensor name pattern>=<buffer type>,...",
2403
+ "override tensor buffer type for draft model", [](common_params & params, const std::string & value) {
2404
+ parse_tensor_buffer_overrides(value, params.speculative.tensor_buft_overrides);
2405
+ }
2406
+ ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
2386
2407
  add_opt(common_arg(
2387
2408
  {"--cpu-moe", "-cmoe"},
2388
2409
  "keep all Mixture of Experts (MoE) weights in the CPU",
@@ -2405,6 +2426,27 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
2405
2426
  }
2406
2427
  }
2407
2428
  ).set_env("LLAMA_ARG_N_CPU_MOE"));
2429
+ add_opt(common_arg(
2430
+ {"--cpu-moe-draft", "-cmoed"},
2431
+ "keep all Mixture of Experts (MoE) weights in the CPU for the draft model",
2432
+ [](common_params & params) {
2433
+ params.speculative.tensor_buft_overrides.push_back({"\\.ffn_(up|down|gate)_exps", ggml_backend_cpu_buffer_type()});
2434
+ }
2435
+ ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CPU_MOE_DRAFT"));
2436
+ add_opt(common_arg(
2437
+ {"--n-cpu-moe-draft", "-ncmoed"}, "N",
2438
+ "keep the Mixture of Experts (MoE) weights of the first N layers in the CPU for the draft model",
2439
+ [](common_params & params, int value) {
2440
+ if (value < 0) {
2441
+ throw std::invalid_argument("invalid value");
2442
+ }
2443
+ for (int i = 0; i < value; ++i) {
2444
+ static std::list<std::string> buft_overrides_draft;
2445
+ buft_overrides_draft.push_back(string_format("blk\\.%d\\.ffn_(up|down|gate)_exps", i));
2446
+ params.speculative.tensor_buft_overrides.push_back({buft_overrides_draft.back().c_str(), ggml_backend_cpu_buffer_type()});
2447
+ }
2448
+ }
2449
+ ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_N_CPU_MOE_DRAFT"));
2408
2450
  add_opt(common_arg(
2409
2451
  {"-ngl", "--gpu-layers", "--n-gpu-layers"}, "N",
2410
2452
  "number of layers to store in VRAM",
@@ -2655,7 +2697,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
2655
2697
  [](common_params & params, const std::string & value) {
2656
2698
  params.out_file = value;
2657
2699
  }
2658
- ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA, LLAMA_EXAMPLE_TTS}));
2700
+ ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA, LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_FINETUNE}));
2659
2701
  add_opt(common_arg(
2660
2702
  {"-ofreq", "--output-frequency"}, "N",
2661
2703
  string_format("output the imatrix every N iterations (default: %d)", params.n_out_freq),
@@ -2949,11 +2991,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
2949
2991
  "- deepseek: puts thoughts in `message.reasoning_content` (except in streaming mode, which behaves as `none`)\n"
2950
2992
  "(default: auto)",
2951
2993
  [](common_params & params, const std::string & value) {
2952
- /**/ if (value == "deepseek") { params.reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK; }
2953
- else if (value == "deepseek-legacy") { params.reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY; }
2954
- else if (value == "none") { params.reasoning_format = COMMON_REASONING_FORMAT_NONE; }
2955
- else if (value == "auto") { params.reasoning_format = COMMON_REASONING_FORMAT_AUTO; }
2956
- else { throw std::invalid_argument("invalid value"); }
2994
+ params.reasoning_format = common_reasoning_format_from_name(value);
2957
2995
  }
2958
2996
  ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_THINK"));
2959
2997
  add_opt(common_arg(
@@ -3134,7 +3172,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
3134
3172
  params.speculative.cpuparams.n_threads = std::thread::hardware_concurrency();
3135
3173
  }
3136
3174
  }
3137
- ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
3175
+ ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
3138
3176
  add_opt(common_arg(
3139
3177
  {"-tbd", "--threads-batch-draft"}, "N",
3140
3178
  "number of threads to use during batch and prompt processing (default: same as --threads-draft)",
@@ -3144,7 +3182,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
3144
3182
  params.speculative.cpuparams_batch.n_threads = std::thread::hardware_concurrency();
3145
3183
  }
3146
3184
  }
3147
- ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}));
3185
+ ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
3148
3186
  add_opt(common_arg(
3149
3187
  {"-Cd", "--cpu-mask-draft"}, "M",
3150
3188
  "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)",
@@ -3537,5 +3575,51 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
3537
3575
  ).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
3538
3576
 
3539
3577
 
3578
+ add_opt(
3579
+ common_arg({ "-lr", "--learning-rate" }, "ALPHA",
3580
+ string_format(
3581
+ "adamw or sgd optimizer alpha (default: %.2g); note: sgd alpha recommended ~10x (no momentum)",
3582
+ (double) params.lr.lr0),
3583
+ [](common_params & params, const std::string & value) { params.lr.lr0 = std::stof(value); })
3584
+ .set_examples({ LLAMA_EXAMPLE_FINETUNE }));
3585
+ add_opt(
3586
+ common_arg({ "-lr-min", "--learning-rate-min" }, "ALPHA",
3587
+ string_format(
3588
+ "(if >0) final learning rate after decay (if -decay-epochs is set, default=%.2g)",
3589
+ (double) params.lr.lr_min),
3590
+ [](common_params & params, const std::string & value) { params.lr.lr_min = std::stof(value); })
3591
+ .set_examples({ LLAMA_EXAMPLE_FINETUNE }));
3592
+ add_opt(
3593
+ common_arg({ "-decay-epochs", "--learning-rate-decay-epochs" }, "ALPHA",
3594
+ string_format(
3595
+ "(if >0) decay learning rate to -lr-min after this many epochs (exponential decay, default=%.2g)",
3596
+ (double) params.lr.decay_epochs),
3597
+ [](common_params & params, const std::string & value) { params.lr.decay_epochs = std::stof(value); })
3598
+ .set_examples({ LLAMA_EXAMPLE_FINETUNE }));
3599
+ add_opt(common_arg(
3600
+ { "-wd", "--weight-decay" }, "WD",
3601
+ string_format(
3602
+ "adamw or sgd optimizer weight decay (0 is off; recommend very small e.g. 1e-9) (default: %.2g).",
3603
+ (double) params.lr.wd),
3604
+ [](common_params & params, const std::string & value) { params.lr.wd = std::stof(value); })
3605
+ .set_examples({ LLAMA_EXAMPLE_FINETUNE }));
3606
+ add_opt(common_arg({ "-val-split", "--val-split" }, "FRACTION",
3607
+ string_format("fraction of data to use as validation set for training (default: %.2g).",
3608
+ (double) params.val_split),
3609
+ [](common_params & params, const std::string & value) { params.val_split = std::stof(value); })
3610
+ .set_examples({ LLAMA_EXAMPLE_FINETUNE }));
3611
+ add_opt(common_arg({ "-epochs", "--epochs" }, "N",
3612
+ string_format("optimizer max # of epochs (default: %d)", params.lr.epochs),
3613
+ [](common_params & params, int epochs) { params.lr.epochs = epochs; })
3614
+ .set_examples({ LLAMA_EXAMPLE_FINETUNE }));
3615
+ add_opt(common_arg({ "-opt", "--optimizer" }, "sgd|adamw", "adamw or sgd",
3616
+ [](common_params & params, const std::string & name) {
3617
+ params.optimizer = common_opt_get_optimizer(name.c_str());
3618
+ if (params.optimizer == GGML_OPT_OPTIMIZER_TYPE_COUNT) {
3619
+ throw std::invalid_argument("invalid --optimizer, valid options: adamw, sgd");
3620
+ }
3621
+ })
3622
+ .set_examples({ LLAMA_EXAMPLE_FINETUNE }));
3623
+
3540
3624
  return ctx_arg;
3541
3625
  }
@@ -55,7 +55,15 @@ bool common_chat_msg_parser::add_tool_call(const std::string & name, const std::
55
55
  bool common_chat_msg_parser::add_tool_call(const json & tool_call) {
56
56
  std::string name = tool_call.contains("name") ? tool_call.at("name") : "";
57
57
  std::string id = tool_call.contains("id") ? tool_call.at("id") : "";
58
- std::string arguments = tool_call.contains("arguments") ? tool_call.at("arguments") : "";
58
+ std::string arguments = "";
59
+ if (tool_call.contains("arguments")) {
60
+ if (tool_call.at("arguments").is_object()) {
61
+ arguments = tool_call.at("arguments").dump();
62
+ } else {
63
+ arguments = tool_call.at("arguments");
64
+ }
65
+ }
66
+
59
67
  return add_tool_call(name, id, arguments);
60
68
  }
61
69