@fugood/llama.node 0.3.17 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (193) hide show
  1. package/CMakeLists.txt +3 -1
  2. package/bin/darwin/arm64/llama-node.node +0 -0
  3. package/bin/darwin/x64/llama-node.node +0 -0
  4. package/bin/linux/arm64/llama-node.node +0 -0
  5. package/bin/linux/x64/llama-node.node +0 -0
  6. package/bin/linux-cuda/arm64/llama-node.node +0 -0
  7. package/bin/linux-cuda/x64/llama-node.node +0 -0
  8. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  9. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  10. package/bin/win32/arm64/llama-node.node +0 -0
  11. package/bin/win32/arm64/node.lib +0 -0
  12. package/bin/win32/x64/llama-node.node +0 -0
  13. package/bin/win32/x64/node.lib +0 -0
  14. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  15. package/bin/win32-vulkan/arm64/node.lib +0 -0
  16. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  17. package/bin/win32-vulkan/x64/node.lib +0 -0
  18. package/lib/binding.ts +39 -2
  19. package/lib/index.js +132 -1
  20. package/lib/index.ts +203 -3
  21. package/package.json +2 -1
  22. package/src/EmbeddingWorker.cpp +1 -1
  23. package/src/LlamaCompletionWorker.cpp +366 -19
  24. package/src/LlamaCompletionWorker.h +30 -10
  25. package/src/LlamaContext.cpp +213 -5
  26. package/src/LlamaContext.h +12 -0
  27. package/src/common.hpp +15 -0
  28. package/src/llama.cpp/.github/workflows/build-linux-cross.yml +133 -24
  29. package/src/llama.cpp/.github/workflows/build.yml +41 -762
  30. package/src/llama.cpp/.github/workflows/docker.yml +5 -2
  31. package/src/llama.cpp/.github/workflows/release.yml +716 -0
  32. package/src/llama.cpp/.github/workflows/server.yml +12 -12
  33. package/src/llama.cpp/CMakeLists.txt +5 -17
  34. package/src/llama.cpp/cmake/build-info.cmake +8 -2
  35. package/src/llama.cpp/cmake/x64-windows-llvm.cmake +0 -6
  36. package/src/llama.cpp/common/CMakeLists.txt +31 -3
  37. package/src/llama.cpp/common/arg.cpp +48 -29
  38. package/src/llama.cpp/common/chat.cpp +128 -106
  39. package/src/llama.cpp/common/chat.h +2 -0
  40. package/src/llama.cpp/common/common.cpp +37 -1
  41. package/src/llama.cpp/common/common.h +18 -9
  42. package/src/llama.cpp/common/llguidance.cpp +1 -0
  43. package/src/llama.cpp/common/minja/chat-template.hpp +9 -5
  44. package/src/llama.cpp/common/minja/minja.hpp +69 -36
  45. package/src/llama.cpp/common/regex-partial.cpp +204 -0
  46. package/src/llama.cpp/common/regex-partial.h +56 -0
  47. package/src/llama.cpp/common/sampling.cpp +57 -50
  48. package/src/llama.cpp/examples/CMakeLists.txt +2 -23
  49. package/src/llama.cpp/examples/embedding/embedding.cpp +2 -11
  50. package/src/llama.cpp/examples/parallel/parallel.cpp +86 -14
  51. package/src/llama.cpp/examples/training/CMakeLists.txt +5 -0
  52. package/src/llama.cpp/examples/training/finetune.cpp +96 -0
  53. package/src/llama.cpp/ggml/CMakeLists.txt +27 -0
  54. package/src/llama.cpp/ggml/include/ggml-backend.h +4 -4
  55. package/src/llama.cpp/ggml/include/ggml-cpp.h +1 -1
  56. package/src/llama.cpp/ggml/include/ggml-opt.h +47 -28
  57. package/src/llama.cpp/ggml/include/ggml.h +10 -7
  58. package/src/llama.cpp/ggml/src/CMakeLists.txt +1 -1
  59. package/src/llama.cpp/ggml/src/ggml-alloc.c +4 -1
  60. package/src/llama.cpp/ggml/src/ggml-backend.cpp +9 -5
  61. package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +20 -13
  62. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +0 -2
  63. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +306 -6
  64. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +4 -13
  65. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +29 -16
  66. package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.cpp +88 -5
  67. package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.h +47 -12
  68. package/src/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +264 -69
  69. package/src/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +501 -0
  70. package/src/llama.cpp/ggml/src/ggml-cpu/ops.cpp +0 -13
  71. package/src/llama.cpp/ggml/src/ggml-cpu/vec.cpp +0 -6
  72. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +23 -4
  73. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +36 -11
  74. package/src/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +0 -2
  75. package/src/llama.cpp/ggml/src/ggml-opt.cpp +368 -190
  76. package/src/llama.cpp/ggml/src/ggml-quants.c +0 -6
  77. package/src/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +41 -27
  78. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +29 -23
  79. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +9 -8
  80. package/src/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +121 -232
  81. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +7 -15
  82. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +72 -25
  83. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +14 -7
  84. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +59 -21
  85. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +7 -1
  86. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +0 -23
  87. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +37 -8
  88. package/src/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +338 -166
  89. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +185 -89
  90. package/src/llama.cpp/ggml/src/ggml-sycl/quants.hpp +83 -0
  91. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +128 -53
  92. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +81 -70
  93. package/src/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +657 -193
  94. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +20 -0
  95. package/src/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +123 -29
  96. package/src/llama.cpp/ggml/src/ggml.c +29 -20
  97. package/src/llama.cpp/ggml/src/gguf.cpp +33 -33
  98. package/src/llama.cpp/include/llama.h +52 -11
  99. package/src/llama.cpp/requirements/requirements-all.txt +3 -3
  100. package/src/llama.cpp/scripts/xxd.cmake +1 -1
  101. package/src/llama.cpp/src/CMakeLists.txt +1 -0
  102. package/src/llama.cpp/src/llama-adapter.cpp +6 -0
  103. package/src/llama.cpp/src/llama-arch.cpp +3 -0
  104. package/src/llama.cpp/src/llama-batch.cpp +5 -1
  105. package/src/llama.cpp/src/llama-batch.h +2 -1
  106. package/src/llama.cpp/src/llama-chat.cpp +17 -7
  107. package/src/llama.cpp/src/llama-chat.h +1 -0
  108. package/src/llama.cpp/src/llama-context.cpp +389 -501
  109. package/src/llama.cpp/src/llama-context.h +44 -32
  110. package/src/llama.cpp/src/llama-cparams.h +1 -0
  111. package/src/llama.cpp/src/llama-graph.cpp +20 -38
  112. package/src/llama.cpp/src/llama-graph.h +12 -8
  113. package/src/llama.cpp/src/llama-kv-cache.cpp +1503 -389
  114. package/src/llama.cpp/src/llama-kv-cache.h +271 -85
  115. package/src/llama.cpp/src/llama-memory.h +11 -1
  116. package/src/llama.cpp/src/llama-model-loader.cpp +24 -15
  117. package/src/llama.cpp/src/llama-model-saver.cpp +281 -0
  118. package/src/llama.cpp/src/llama-model-saver.h +37 -0
  119. package/src/llama.cpp/src/llama-model.cpp +316 -69
  120. package/src/llama.cpp/src/llama-model.h +8 -1
  121. package/src/llama.cpp/src/llama-quant.cpp +15 -13
  122. package/src/llama.cpp/src/llama-sampling.cpp +18 -6
  123. package/src/llama.cpp/src/llama-vocab.cpp +42 -4
  124. package/src/llama.cpp/src/llama-vocab.h +6 -0
  125. package/src/llama.cpp/src/llama.cpp +14 -0
  126. package/src/llama.cpp/tests/CMakeLists.txt +10 -2
  127. package/src/llama.cpp/tests/test-backend-ops.cpp +107 -47
  128. package/src/llama.cpp/tests/test-chat-template.cpp +10 -11
  129. package/src/llama.cpp/tests/test-chat.cpp +3 -1
  130. package/src/llama.cpp/tests/test-mtmd-c-api.c +63 -0
  131. package/src/llama.cpp/tests/test-opt.cpp +33 -21
  132. package/src/llama.cpp/tests/test-regex-partial.cpp +288 -0
  133. package/src/llama.cpp/tests/test-sampling.cpp +1 -1
  134. package/src/llama.cpp/tools/CMakeLists.txt +39 -0
  135. package/src/llama.cpp/{examples → tools}/batched-bench/batched-bench.cpp +2 -2
  136. package/src/llama.cpp/{examples → tools}/imatrix/imatrix.cpp +11 -9
  137. package/src/llama.cpp/{examples → tools}/llama-bench/llama-bench.cpp +495 -348
  138. package/src/llama.cpp/{examples → tools}/main/main.cpp +6 -9
  139. package/src/llama.cpp/{examples/llava → tools/mtmd}/CMakeLists.txt +1 -35
  140. package/src/llama.cpp/{examples/llava → tools/mtmd}/clip-impl.h +25 -5
  141. package/src/llama.cpp/{examples/llava → tools/mtmd}/clip.cpp +1440 -1349
  142. package/src/llama.cpp/tools/mtmd/clip.h +99 -0
  143. package/src/llama.cpp/{examples/llava → tools/mtmd}/mtmd-cli.cpp +70 -44
  144. package/src/llama.cpp/tools/mtmd/mtmd-helper.cpp +310 -0
  145. package/src/llama.cpp/{examples/llava → tools/mtmd}/mtmd.cpp +251 -281
  146. package/src/llama.cpp/tools/mtmd/mtmd.h +331 -0
  147. package/src/llama.cpp/{examples → tools}/perplexity/perplexity.cpp +4 -2
  148. package/src/llama.cpp/{examples → tools}/quantize/quantize.cpp +13 -76
  149. package/src/llama.cpp/{examples → tools}/rpc/rpc-server.cpp +70 -74
  150. package/src/llama.cpp/{examples → tools}/run/run.cpp +18 -4
  151. package/src/llama.cpp/{examples → tools}/server/CMakeLists.txt +2 -1
  152. package/src/llama.cpp/{examples → tools}/server/server.cpp +291 -76
  153. package/src/llama.cpp/{examples → tools}/server/utils.hpp +377 -5
  154. package/src/llama.cpp/cmake/arm64-windows-msvc.cmake +0 -6
  155. package/src/llama.cpp/examples/infill/CMakeLists.txt +0 -5
  156. package/src/llama.cpp/examples/infill/infill.cpp +0 -590
  157. package/src/llama.cpp/examples/llava/android/build_64.sh +0 -8
  158. package/src/llama.cpp/examples/llava/clip-quantize-cli.cpp +0 -59
  159. package/src/llama.cpp/examples/llava/clip.h +0 -135
  160. package/src/llama.cpp/examples/llava/llava.cpp +0 -586
  161. package/src/llama.cpp/examples/llava/llava.h +0 -49
  162. package/src/llama.cpp/examples/llava/mtmd.h +0 -168
  163. package/src/llama.cpp/examples/llava/qwen2vl-test.cpp +0 -636
  164. /package/src/llama.cpp/{examples → tools}/batched-bench/CMakeLists.txt +0 -0
  165. /package/src/llama.cpp/{examples → tools}/cvector-generator/CMakeLists.txt +0 -0
  166. /package/src/llama.cpp/{examples → tools}/cvector-generator/completions.txt +0 -0
  167. /package/src/llama.cpp/{examples → tools}/cvector-generator/cvector-generator.cpp +0 -0
  168. /package/src/llama.cpp/{examples → tools}/cvector-generator/mean.hpp +0 -0
  169. /package/src/llama.cpp/{examples → tools}/cvector-generator/negative.txt +0 -0
  170. /package/src/llama.cpp/{examples → tools}/cvector-generator/pca.hpp +0 -0
  171. /package/src/llama.cpp/{examples → tools}/cvector-generator/positive.txt +0 -0
  172. /package/src/llama.cpp/{examples → tools}/export-lora/CMakeLists.txt +0 -0
  173. /package/src/llama.cpp/{examples → tools}/export-lora/export-lora.cpp +0 -0
  174. /package/src/llama.cpp/{examples → tools}/gguf-split/CMakeLists.txt +0 -0
  175. /package/src/llama.cpp/{examples → tools}/gguf-split/gguf-split.cpp +0 -0
  176. /package/src/llama.cpp/{examples → tools}/imatrix/CMakeLists.txt +0 -0
  177. /package/src/llama.cpp/{examples → tools}/llama-bench/CMakeLists.txt +0 -0
  178. /package/src/llama.cpp/{examples → tools}/main/CMakeLists.txt +0 -0
  179. /package/src/llama.cpp/{examples/llava → tools/mtmd}/deprecation-warning.cpp +0 -0
  180. /package/src/llama.cpp/{examples/llava → tools/mtmd}/requirements.txt +0 -0
  181. /package/src/llama.cpp/{examples → tools}/perplexity/CMakeLists.txt +0 -0
  182. /package/src/llama.cpp/{examples → tools}/quantize/CMakeLists.txt +0 -0
  183. /package/src/llama.cpp/{examples → tools}/rpc/CMakeLists.txt +0 -0
  184. /package/src/llama.cpp/{examples → tools}/run/CMakeLists.txt +0 -0
  185. /package/src/llama.cpp/{examples → tools}/run/linenoise.cpp/linenoise.cpp +0 -0
  186. /package/src/llama.cpp/{examples → tools}/run/linenoise.cpp/linenoise.h +0 -0
  187. /package/src/llama.cpp/{examples → tools}/server/bench/requirements.txt +0 -0
  188. /package/src/llama.cpp/{examples → tools}/server/httplib.h +0 -0
  189. /package/src/llama.cpp/{examples → tools}/server/tests/requirements.txt +0 -0
  190. /package/src/llama.cpp/{examples → tools}/tokenize/CMakeLists.txt +0 -0
  191. /package/src/llama.cpp/{examples → tools}/tokenize/tokenize.cpp +0 -0
  192. /package/src/llama.cpp/{examples → tools}/tts/CMakeLists.txt +0 -0
  193. /package/src/llama.cpp/{examples → tools}/tts/tts.cpp +0 -0
@@ -299,10 +299,10 @@ bool gguf_read_emplace_helper(const struct gguf_reader & gr, std::vector<struct
299
299
  return false;
300
300
  }
301
301
  } catch (std::length_error &) {
302
- fprintf(stderr, "%s: encountered length_error while reading value for key '%s'\n", __func__, key.c_str());
302
+ GGML_LOG_ERROR("%s: encountered length_error while reading value for key '%s'\n", __func__, key.c_str());
303
303
  return false;
304
304
  } catch (std::bad_alloc &) {
305
- fprintf(stderr, "%s: encountered bad_alloc error while reading value for key '%s'\n", __func__, key.c_str());
305
+ GGML_LOG_ERROR("%s: encountered bad_alloc error while reading value for key '%s'\n", __func__, key.c_str());
306
306
  return false;
307
307
  }
308
308
  kv.emplace_back(key, value);
@@ -328,14 +328,14 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
328
328
  ok = ok && gr.read(magic, 4);
329
329
 
330
330
  if (!ok) {
331
- fprintf(stderr, "%s: failed to read magic\n", __func__);
331
+ GGML_LOG_ERROR("%s: failed to read magic\n", __func__);
332
332
  gguf_free(ctx);
333
333
  return nullptr;
334
334
  }
335
335
 
336
336
  for (uint32_t i = 0; i < magic.size(); i++) {
337
337
  if (magic[i] != GGUF_MAGIC[i]) {
338
- fprintf(stderr, "%s: invalid magic characters: '%c%c%c%c', expected 'GGUF'\n", __func__, magic[0], magic[1], magic[2], magic[3]);
338
+ GGML_LOG_ERROR("%s: invalid magic characters: '%c%c%c%c', expected 'GGUF'\n", __func__, magic[0], magic[1], magic[2], magic[3]);
339
339
  gguf_free(ctx);
340
340
  return nullptr;
341
341
  }
@@ -348,11 +348,11 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
348
348
 
349
349
  if (ok && gr.read(ctx->version)) {
350
350
  if (ctx->version == 1) {
351
- fprintf(stderr, "%s: GGUFv1 is no longer supported, please use a more up-to-date version\n", __func__);
351
+ GGML_LOG_ERROR("%s: GGUFv1 is no longer supported, please use a more up-to-date version\n", __func__);
352
352
  ok = false;
353
353
  }
354
354
  if (ctx->version > GGUF_VERSION) {
355
- fprintf(stderr, "%s: this GGUF file is version %" PRIu32 " but this software only supports up to version %d\n",
355
+ GGML_LOG_ERROR("%s: this GGUF file is version %" PRIu32 " but this software only supports up to version %d\n",
356
356
  __func__, ctx->version, GGUF_VERSION);
357
357
  ok = false;
358
358
  }
@@ -363,7 +363,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
363
363
  if (ok && gr.read(n_tensors)) {
364
364
  static_assert(sizeof(size_t) <= 8 && sizeof(gguf_tensor_info) >= 2, "int64_t insufficient for indexing");
365
365
  if (n_tensors < 0 || n_tensors > int64_t(SIZE_MAX/sizeof(gguf_tensor_info))) {
366
- fprintf(stderr, "%s: number of tensors is %" PRIi64 " but must be in [0, %zu]\n",
366
+ GGML_LOG_ERROR("%s: number of tensors is %" PRIi64 " but must be in [0, %zu]\n",
367
367
  __func__, n_tensors, SIZE_MAX/sizeof(gguf_tensor_info));
368
368
  ok = false;
369
369
  }
@@ -374,7 +374,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
374
374
  if (ok && gr.read(n_kv)) {
375
375
  static_assert(sizeof(size_t) <= 8 && sizeof(gguf_tensor_info) >= 2, "int64_t insufficient for indexing");
376
376
  if (n_kv < 0 || n_kv > int64_t(SIZE_MAX/sizeof(gguf_kv))) {
377
- fprintf(stderr, "%s: number of key value pairs is %" PRIi64 " but must be in [0, %zu]\n",
377
+ GGML_LOG_ERROR("%s: number of key value pairs is %" PRIi64 " but must be in [0, %zu]\n",
378
378
  __func__, n_kv, SIZE_MAX/sizeof(gguf_kv));
379
379
  ok = false;
380
380
  }
@@ -383,7 +383,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
383
383
  }
384
384
 
385
385
  if (!ok) {
386
- fprintf(stderr, "%s: failed to read header\n", __func__);
386
+ GGML_LOG_ERROR("%s: failed to read header\n", __func__);
387
387
  gguf_free(ctx);
388
388
  return nullptr;
389
389
  }
@@ -399,15 +399,15 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
399
399
  try {
400
400
  ok = ok && gr.read(key);
401
401
  } catch (std::length_error &) {
402
- fprintf(stderr, "%s: encountered length_error while reading key %" PRIi64 "\n", __func__, i);
402
+ GGML_LOG_ERROR("%s: encountered length_error while reading key %" PRIi64 "\n", __func__, i);
403
403
  ok = false;
404
404
  } catch (std::bad_alloc &) {
405
- fprintf(stderr, "%s: encountered bad_alloc error while reading key %" PRIi64 "\n", __func__, i);
405
+ GGML_LOG_ERROR("%s: encountered bad_alloc error while reading key %" PRIi64 "\n", __func__, i);
406
406
  ok = false;
407
407
  }
408
408
  for (size_t j = 0; ok && j < ctx->kv.size(); ++j) {
409
409
  if (key == ctx->kv[j].key) {
410
- fprintf(stderr, "%s: duplicate key '%s' for tensors %zu and %" PRIi64 " \n", __func__, key.c_str(), j, i);
410
+ GGML_LOG_ERROR("%s: duplicate key '%s' for tensors %zu and %" PRIi64 " \n", __func__, key.c_str(), j, i);
411
411
  ok = false;
412
412
  }
413
413
  }
@@ -441,14 +441,14 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
441
441
  case GGUF_TYPE_ARRAY:
442
442
  default:
443
443
  {
444
- fprintf(stderr, "%s: key '%s' has invalid GGUF type %d\n", __func__, key.c_str(), type);
444
+ GGML_LOG_ERROR("%s: key '%s' has invalid GGUF type %d\n", __func__, key.c_str(), type);
445
445
  ok = false;
446
446
  } break;
447
447
  }
448
448
  }
449
449
 
450
450
  if (!ok) {
451
- fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
451
+ GGML_LOG_ERROR("%s: failed to read key-value pairs\n", __func__);
452
452
  gguf_free(ctx);
453
453
  return nullptr;
454
454
  }
@@ -458,7 +458,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
458
458
  ctx->alignment = alignment_idx == -1 ? GGUF_DEFAULT_ALIGNMENT : gguf_get_val_u32(ctx, alignment_idx);
459
459
 
460
460
  if (ctx->alignment == 0 || (ctx->alignment & (ctx->alignment - 1)) != 0) {
461
- fprintf(stderr, "%s: alignment %zu is not a power of 2\n", __func__, ctx->alignment);
461
+ GGML_LOG_ERROR("%s: alignment %zu is not a power of 2\n", __func__, ctx->alignment);
462
462
  gguf_free(ctx);
463
463
  return nullptr;
464
464
  }
@@ -474,14 +474,14 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
474
474
  try {
475
475
  ok = ok && gr.read(name);
476
476
  } catch (std::length_error &) {
477
- fprintf(stderr, "%s: encountered length_error while reading tensor name %" PRIi64 "\n", __func__, i);
477
+ GGML_LOG_ERROR("%s: encountered length_error while reading tensor name %" PRIi64 "\n", __func__, i);
478
478
  ok = false;
479
479
  } catch (std::bad_alloc &) {
480
- fprintf(stderr, "%s: encountered bad_alloc error while reading tensor name %" PRIi64 "\n", __func__, i);
480
+ GGML_LOG_ERROR("%s: encountered bad_alloc error while reading tensor name %" PRIi64 "\n", __func__, i);
481
481
  ok = false;
482
482
  }
483
483
  if (name.length() >= GGML_MAX_NAME) {
484
- fprintf(stderr, "%s: tensor name %" PRIi64 " is too long: %zu >= %d\n", __func__, i, name.length(), GGML_MAX_NAME);
484
+ GGML_LOG_ERROR("%s: tensor name %" PRIi64 " is too long: %zu >= %d\n", __func__, i, name.length(), GGML_MAX_NAME);
485
485
  ok = false;
486
486
  break;
487
487
  }
@@ -490,7 +490,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
490
490
  // make sure there are no duplicate tensor names
491
491
  for (int64_t j = 0; ok && j < i; ++j) {
492
492
  if (strcmp(info.t.name, ctx->info[j].t.name) == 0) {
493
- fprintf(stderr, "%s: duplicate tensor name '%s' for tensors %" PRIi64 " and %" PRIi64 "\n", __func__, info.t.name, j, i);
493
+ GGML_LOG_ERROR("%s: duplicate tensor name '%s' for tensors %" PRIi64 " and %" PRIi64 "\n", __func__, info.t.name, j, i);
494
494
  ok = false;
495
495
  break;
496
496
  }
@@ -505,7 +505,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
505
505
  uint32_t n_dims = -1;
506
506
  ok = ok && gr.read(n_dims);
507
507
  if (n_dims > GGML_MAX_DIMS) {
508
- fprintf(stderr, "%s: tensor '%s' has invalid number of dimensions: %" PRIu32 " > %" PRIu32 "\n",
508
+ GGML_LOG_ERROR("%s: tensor '%s' has invalid number of dimensions: %" PRIu32 " > %" PRIu32 "\n",
509
509
  __func__, info.t.name, n_dims, GGML_MAX_DIMS);
510
510
  ok = false;
511
511
  break;
@@ -518,7 +518,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
518
518
 
519
519
  // check that all ne are non-negative
520
520
  if (info.t.ne[j] < 0) {
521
- fprintf(stderr, "%s: tensor '%s' dimension %" PRIu32 " has invalid number of elements: %" PRIi64 " < 0\n",
521
+ GGML_LOG_ERROR("%s: tensor '%s' dimension %" PRIu32 " has invalid number of elements: %" PRIi64 " < 0\n",
522
522
  __func__, info.t.name, j, info.t.ne[j]);
523
523
  ok = false;
524
524
  break;
@@ -530,7 +530,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
530
530
  (INT64_MAX/info.t.ne[2] <= info.t.ne[0]*info.t.ne[1]) ||
531
531
  (INT64_MAX/info.t.ne[3] <= info.t.ne[0]*info.t.ne[1]*info.t.ne[2]))) {
532
532
 
533
- fprintf(stderr, "%s: total number of elements in tensor '%s' with shape "
533
+ GGML_LOG_ERROR("%s: total number of elements in tensor '%s' with shape "
534
534
  "(%" PRIi64 ", %" PRIi64 ", %" PRIi64 ", %" PRIi64 ") is >= %" PRIi64 "\n",
535
535
  __func__, info.t.name, info.t.ne[0], info.t.ne[1], info.t.ne[2], info.t.ne[3], INT64_MAX);
536
536
  ok = false;
@@ -547,7 +547,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
547
547
 
548
548
  // check that tensor type is within defined range
549
549
  if (info.t.type < 0 || info.t.type >= GGML_TYPE_COUNT) {
550
- fprintf(stderr, "%s: tensor '%s' has invalid ggml type %d (%s)\n",
550
+ GGML_LOG_ERROR("%s: tensor '%s' has invalid ggml type %d (%s)\n",
551
551
  __func__, info.t.name, info.t.type, ggml_type_name(info.t.type));
552
552
  ok = false;
553
553
  break;
@@ -557,7 +557,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
557
557
 
558
558
  // check that row size is divisible by block size
559
559
  if (blck_size == 0 || info.t.ne[0] % blck_size != 0) {
560
- fprintf(stderr, "%s: tensor '%s' of type %d (%s) has %" PRId64 " elements per row, "
560
+ GGML_LOG_ERROR("%s: tensor '%s' of type %d (%s) has %" PRId64 " elements per row, "
561
561
  "not a multiple of block size (%" PRId64 ")\n",
562
562
  __func__, info.t.name, (int) info.t.type, ggml_type_name(info.t.type), info.t.ne[0], blck_size);
563
563
  ok = false;
@@ -582,7 +582,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
582
582
  }
583
583
 
584
584
  if (!ok) {
585
- fprintf(stderr, "%s: failed to read tensor info\n", __func__);
585
+ GGML_LOG_ERROR("%s: failed to read tensor info\n", __func__);
586
586
  gguf_free(ctx);
587
587
  return nullptr;
588
588
  }
@@ -590,7 +590,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
590
590
 
591
591
  // we require the data section to be aligned, so take into account any padding
592
592
  if (fseek(file, GGML_PAD(ftell(file), ctx->alignment), SEEK_SET) != 0) {
593
- fprintf(stderr, "%s: failed to seek to beginning of data section\n", __func__);
593
+ GGML_LOG_ERROR("%s: failed to seek to beginning of data section\n", __func__);
594
594
  gguf_free(ctx);
595
595
  return nullptr;
596
596
  }
@@ -604,9 +604,9 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
604
604
  for (size_t i = 0; i < ctx->info.size(); ++i) {
605
605
  const gguf_tensor_info & ti = ctx->info[i];
606
606
  if (ti.offset != ctx->size) {
607
- fprintf(stderr, "%s: tensor '%s' has offset %" PRIu64 ", expected %zu\n",
607
+ GGML_LOG_ERROR("%s: tensor '%s' has offset %" PRIu64 ", expected %zu\n",
608
608
  __func__, ti.t.name, ti.offset, ctx->size);
609
- fprintf(stderr, "%s: failed to read tensor data\n", __func__);
609
+ GGML_LOG_ERROR("%s: failed to read tensor data\n", __func__);
610
610
  gguf_free(ctx);
611
611
  return nullptr;
612
612
  }
@@ -634,7 +634,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
634
634
 
635
635
  *params.ctx = ggml_init(pdata);
636
636
  if (*params.ctx == nullptr) {
637
- fprintf(stderr, "%s: failed to initialize ggml context for storing tensors\n", __func__);
637
+ GGML_LOG_ERROR("%s: failed to initialize ggml context for storing tensors\n", __func__);
638
638
  gguf_free(ctx);
639
639
  return nullptr;
640
640
  }
@@ -656,7 +656,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
656
656
  ok = ok && gr.read(data->data, ctx->size);
657
657
 
658
658
  if (!ok) {
659
- fprintf(stderr, "%s: failed to read tensor data binary blob\n", __func__);
659
+ GGML_LOG_ERROR("%s: failed to read tensor data binary blob\n", __func__);
660
660
  ggml_free(ctx_data);
661
661
  *params.ctx = nullptr;
662
662
  gguf_free(ctx);
@@ -689,7 +689,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
689
689
  }
690
690
 
691
691
  if (!ok) {
692
- fprintf(stderr, "%s: failed to create tensors\n", __func__);
692
+ GGML_LOG_ERROR("%s: failed to create tensors\n", __func__);
693
693
  ggml_free(ctx_data);
694
694
  *params.ctx = nullptr;
695
695
  gguf_free(ctx);
@@ -706,7 +706,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
706
706
  FILE * file = ggml_fopen(fname, "rb");
707
707
 
708
708
  if (!file) {
709
- fprintf(stderr, "%s: failed to open GGUF file '%s'\n", __func__, fname);
709
+ GGML_LOG_ERROR("%s: failed to open GGUF file '%s'\n", __func__, fname);
710
710
  return nullptr;
711
711
  }
712
712
 
@@ -1305,7 +1305,7 @@ bool gguf_write_to_file(const struct gguf_context * ctx, const char * fname, boo
1305
1305
  FILE * file = ggml_fopen(fname, "wb");
1306
1306
 
1307
1307
  if (!file) {
1308
- fprintf(stderr, "%s: failed to open file '%s' for writing GGUF data\n", __func__, fname);
1308
+ GGML_LOG_ERROR("%s: failed to open file '%s' for writing GGUF data\n", __func__, fname);
1309
1309
  return false;
1310
1310
  }
1311
1311
 
@@ -4,6 +4,7 @@
4
4
  #include "ggml.h"
5
5
  #include "ggml-cpu.h"
6
6
  #include "ggml-backend.h"
7
+ #include "ggml-opt.h"
7
8
 
8
9
  #include <stddef.h>
9
10
  #include <stdint.h>
@@ -112,6 +113,7 @@ extern "C" {
112
113
  LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32,
113
114
  LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33,
114
115
  LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34,
116
+ LLAMA_VOCAB_PRE_TYPE_SEED_CODER = 35,
115
117
  };
116
118
 
117
119
  enum llama_rope_type {
@@ -343,7 +345,7 @@ extern "C" {
343
345
  float yarn_beta_fast; // YaRN low correction dim
344
346
  float yarn_beta_slow; // YaRN high correction dim
345
347
  uint32_t yarn_orig_ctx; // YaRN original context size
346
- float defrag_thold; // defragment the KV cache if holes/size > thold, < 0 disabled (default)
348
+ float defrag_thold; // defragment the KV cache if holes/size > thold, <= 0 disabled (default)
347
349
 
348
350
  ggml_backend_sched_eval_callback cb_eval;
349
351
  void * cb_eval_user_data;
@@ -351,19 +353,18 @@ extern "C" {
351
353
  enum ggml_type type_k; // data type for K cache [EXPERIMENTAL]
352
354
  enum ggml_type type_v; // data type for V cache [EXPERIMENTAL]
353
355
 
354
- // Keep the booleans together and at the end of the struct to avoid misalignment during copy-by-value.
355
- // TODO: move at the end of the struct
356
- bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
357
- bool embeddings; // if true, extract embeddings (together with logits)
358
- bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
359
- bool flash_attn; // whether to use flash attention [EXPERIMENTAL]
360
- bool no_perf; // whether to measure performance timings
361
-
362
356
  // Abort callback
363
357
  // if it returns true, execution of llama_decode() will be aborted
364
358
  // currently works only with CPU execution
365
359
  ggml_abort_callback abort_callback;
366
360
  void * abort_callback_data;
361
+
362
+ // Keep the booleans together and at the end of the struct to avoid misalignment during copy-by-value.
363
+ bool embeddings; // if true, extract embeddings (together with logits)
364
+ bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
365
+ bool flash_attn; // whether to use flash attention [EXPERIMENTAL]
366
+ bool no_perf; // whether to measure performance timings
367
+ bool op_offload; // whether to offload host tensor operations to device
367
368
  };
368
369
 
369
370
  // model quantization parameters
@@ -445,6 +446,10 @@ extern "C" {
445
446
  size_t n_paths,
446
447
  struct llama_model_params params);
447
448
 
449
+ LLAMA_API void llama_model_save_to_file(
450
+ const struct llama_model * model,
451
+ const char * path_model);
452
+
448
453
  DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
449
454
  "use llama_model_free instead");
450
455
 
@@ -924,14 +929,19 @@ extern "C" {
924
929
  // Frees a batch of tokens allocated with llama_batch_init()
925
930
  LLAMA_API void llama_batch_free(struct llama_batch batch);
926
931
 
927
- // Processes a batch of tokens with the ecoder part of the encoder-decoder model.
928
- // Stores the encoder output internally for later use by the decoder cross-attention layers.
932
+ // Process a batch of tokens.
933
+ // In contrast to llama_decode() - this call does not use KV cache.
934
+ // For encode-decoder contexts, processes the batch using the encoder.
935
+ // Can store the encoder output internally for later use by the decoder's cross-attention layers.
929
936
  // 0 - success
930
937
  // < 0 - error. the KV cache state is restored to the state before this call
931
938
  LLAMA_API int32_t llama_encode(
932
939
  struct llama_context * ctx,
933
940
  struct llama_batch batch);
934
941
 
942
+ // Process a batch of tokens.
943
+ // Requires KV cache.
944
+ // For encode-decoder contexts, processes the batch using the decoder.
935
945
  // Positive return values does not mean a fatal error, but rather a warning.
936
946
  // 0 - success
937
947
  // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
@@ -1428,6 +1438,37 @@ extern "C" {
1428
1438
  LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain);
1429
1439
  LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain);
1430
1440
 
1441
+ //
1442
+ // training
1443
+ //
1444
+
1445
+ // function that returns whether or not a given tensor contains trainable parameters
1446
+ typedef bool (*llama_opt_param_filter)(const struct ggml_tensor * tensor, void * userdata);
1447
+
1448
+ // always returns true
1449
+ LLAMA_API bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userdata);
1450
+
1451
+ struct llama_opt_params {
1452
+ uint32_t n_ctx_train; // assumed context size post training, use context size specified in llama_context if 0
1453
+
1454
+ llama_opt_param_filter param_filter; // callback for determining which tensors contain trainable parameters
1455
+ void * param_filter_ud; // userdata for determining which tensors contain trainable parameters
1456
+
1457
+ ggml_opt_get_optimizer_params get_opt_pars; // callback for calculating optimizer parameters
1458
+ void * get_opt_pars_ud; // userdata for calculating optimizer parameters
1459
+ };
1460
+
1461
+ LLAMA_API void llama_opt_init(struct llama_context * lctx, struct llama_model * model, struct llama_opt_params lopt_params);
1462
+
1463
+ LLAMA_API void llama_opt_epoch(
1464
+ struct llama_context * lctx,
1465
+ ggml_opt_dataset_t dataset,
1466
+ ggml_opt_result_t result_train,
1467
+ ggml_opt_result_t result_eval,
1468
+ int64_t idata_split,
1469
+ ggml_opt_epoch_callback callback_train,
1470
+ ggml_opt_epoch_callback callback_eval);
1471
+
1431
1472
  #ifdef __cplusplus
1432
1473
  }
1433
1474
  #endif
@@ -1,6 +1,6 @@
1
- -r ../examples/llava/requirements.txt
2
- -r ../examples/server/bench/requirements.txt
3
- -r ../examples/server/tests/requirements.txt
1
+ -r ../tools/mtmd/requirements.txt
2
+ -r ../tools/server/bench/requirements.txt
3
+ -r ../tools/server/tests/requirements.txt
4
4
 
5
5
  -r ./requirements-compare-llama-bench.txt
6
6
  -r ./requirements-pydantic.txt
@@ -1,5 +1,5 @@
1
1
  # CMake equivalent of `xxd -i ${INPUT} ${OUTPUT}`
2
- # Usage: cmake -DINPUT=examples/server/public/index.html -DOUTPUT=examples/server/index.html.hpp -P scripts/xxd.cmake
2
+ # Usage: cmake -DINPUT=tools/server/public/index.html -DOUTPUT=tools/server/index.html.hpp -P scripts/xxd.cmake
3
3
 
4
4
  SET(INPUT "" CACHE STRING "Input File")
5
5
  SET(OUTPUT "" CACHE STRING "Output File")
@@ -23,6 +23,7 @@ add_library(llama
23
23
  llama-memory.cpp
24
24
  llama-mmap.cpp
25
25
  llama-model-loader.cpp
26
+ llama-model-saver.cpp
26
27
  llama-model.cpp
27
28
  llama-quant.cpp
28
29
  llama-sampling.cpp
@@ -253,6 +253,9 @@ static void llama_adapter_lora_init_impl(llama_model & model, const char * path_
253
253
  std::vector<ggml_backend_buffer_type_t> buft_extra;
254
254
  {
255
255
  auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
256
+ if (!cpu_dev) {
257
+ throw std::runtime_error(format("%s: no CPU backend found", __func__));
258
+ }
256
259
  auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev);
257
260
 
258
261
  auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
@@ -291,6 +294,9 @@ static void llama_adapter_lora_init_impl(llama_model & model, const char * path_
291
294
  LLAMA_LOG_WARN("%s: lora for '%s' cannot use buft '%s', fallback to CPU\n", __func__, model_tensor->name, ggml_backend_buft_name(buft));
292
295
 
293
296
  auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
297
+ if (!cpu_dev) {
298
+ throw std::runtime_error(format("%s: no CPU backend found", __func__));
299
+ }
294
300
  buft = ggml_backend_dev_buffer_type(cpu_dev);
295
301
 
296
302
  break;
@@ -1481,6 +1481,9 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
1481
1481
  { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1482
1482
  { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1483
1483
  { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1484
+ { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
1485
+ { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
1486
+ { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
1484
1487
  },
1485
1488
  },
1486
1489
  {
@@ -189,7 +189,7 @@ llama_ubatch llama_sbatch::split_seq(size_t n_ubatch) {
189
189
  return ubatch;
190
190
  }
191
191
 
192
- void llama_sbatch::from_batch(const llama_batch & batch, size_t n_embd, bool simple_split, bool logits_all) {
192
+ llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split, bool logits_all) {
193
193
  GGML_ASSERT(batch.n_tokens >= 0);
194
194
  this->batch = &batch;
195
195
  this->n_embd = n_embd;
@@ -203,6 +203,7 @@ void llama_sbatch::from_batch(const llama_batch & batch, size_t n_embd, bool sim
203
203
  for (size_t i = 0; i < n_tokens; ++i) {
204
204
  ids[i] = i;
205
205
  }
206
+
206
207
  if (simple_split) {
207
208
  seq.resize(1);
208
209
  llama_sbatch_seq & s = seq[0];
@@ -212,6 +213,7 @@ void llama_sbatch::from_batch(const llama_batch & batch, size_t n_embd, bool sim
212
213
  s.length = n_tokens;
213
214
  return;
214
215
  }
216
+
215
217
  std::sort(ids.begin(), ids.end(),
216
218
  [&batch](size_t a, size_t b) {
217
219
  int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1;
@@ -239,6 +241,7 @@ void llama_sbatch::from_batch(const llama_batch & batch, size_t n_embd, bool sim
239
241
  return n_seq_a > n_seq_b;
240
242
  }
241
243
  );
244
+
242
245
  // init seq
243
246
  llama_sbatch_seq * last_seq = nullptr;
244
247
 
@@ -262,6 +265,7 @@ void llama_sbatch::from_batch(const llama_batch & batch, size_t n_embd, bool sim
262
265
  seq.push_back(new_seq);
263
266
  last_seq = &seq.back();
264
267
  }
268
+
265
269
  // keep shared prompts first at the end, then sort by length descending.
266
270
  std::sort(seq.begin(), seq.end(),
267
271
  [](llama_sbatch_seq & a, llama_sbatch_seq & b) {
@@ -70,7 +70,8 @@ struct llama_sbatch {
70
70
  // sequence-wise split
71
71
  llama_ubatch split_seq(size_t n_ubatch);
72
72
 
73
- void from_batch(const llama_batch & batch, size_t n_embd, bool simple_split = false, bool logits_all = false);
73
+ llama_sbatch() = default;
74
+ llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split = false, bool logits_all = false);
74
75
  };
75
76
 
76
77
  // temporary allocate memory for the input batch if needed
@@ -35,6 +35,7 @@ static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
35
35
  { "mistral-v3", LLM_CHAT_TEMPLATE_MISTRAL_V3 },
36
36
  { "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN },
37
37
  { "mistral-v7", LLM_CHAT_TEMPLATE_MISTRAL_V7 },
38
+ { "mistral-v7-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V7_TEKKEN },
38
39
  { "phi3", LLM_CHAT_TEMPLATE_PHI_3 },
39
40
  { "phi4", LLM_CHAT_TEMPLATE_PHI_4 },
40
41
  { "falcon3", LLM_CHAT_TEMPLATE_FALCON_3 },
@@ -202,19 +203,20 @@ int32_t llm_chat_apply_template(
202
203
  if (add_ass) {
203
204
  ss << "<|im_start|>assistant\n";
204
205
  }
205
- } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7) {
206
+ } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7 || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7_TEKKEN) {
206
207
  // Official mistral 'v7' template
207
208
  // See: https://huggingface.co/mistralai/Mistral-Large-Instruct-2411#basic-instruct-template-v7
209
+ // https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Instruct-2503#basic-instruct-template-v7-tekken
210
+ const char * trailing_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7 ? " " : "";
208
211
  for (auto message : chat) {
209
212
  std::string role(message->role);
210
213
  std::string content(message->content);
211
214
  if (role == "system") {
212
- ss << "[SYSTEM_PROMPT] " << content << "[/SYSTEM_PROMPT]";
215
+ ss << "[SYSTEM_PROMPT]" << trailing_space << content << "[/SYSTEM_PROMPT]";
213
216
  } else if (role == "user") {
214
- ss << "[INST] " << content << "[/INST]";
215
- }
216
- else {
217
- ss << " " << content << "</s>";
217
+ ss << "[INST]" << trailing_space << content << "[/INST]";
218
+ } else {
219
+ ss << trailing_space << content << "</s>";
218
220
  }
219
221
  }
220
222
  } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1
@@ -447,8 +449,16 @@ int32_t llm_chat_apply_template(
447
449
  if (add_ass) {
448
450
  ss << "<|assistant|>";
449
451
  }
450
- } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGLM_4 || tmpl == LLM_CHAT_TEMPLATE_GLMEDGE) {
452
+ } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGLM_4) {
451
453
  ss << "[gMASK]" << "<sop>";
454
+ for (auto message : chat) {
455
+ std::string role(message->role);
456
+ ss << "<|" << role << "|>" << "\n" << message->content;
457
+ }
458
+ if (add_ass) {
459
+ ss << "<|assistant|>\n";
460
+ }
461
+ } else if (tmpl == LLM_CHAT_TEMPLATE_GLMEDGE) {
452
462
  for (auto message : chat) {
453
463
  std::string role(message->role);
454
464
  ss << "<|" << role << "|>" << "\n" << message->content;
@@ -14,6 +14,7 @@ enum llm_chat_template {
14
14
  LLM_CHAT_TEMPLATE_MISTRAL_V3,
15
15
  LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN,
16
16
  LLM_CHAT_TEMPLATE_MISTRAL_V7,
17
+ LLM_CHAT_TEMPLATE_MISTRAL_V7_TEKKEN,
17
18
  LLM_CHAT_TEMPLATE_PHI_3,
18
19
  LLM_CHAT_TEMPLATE_PHI_4,
19
20
  LLM_CHAT_TEMPLATE_FALCON_3,