cui-llama.rn 1.6.1 → 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. package/android/src/main/CMakeLists.txt +6 -0
  2. package/android/src/main/java/com/rnllama/LlamaContext.java +38 -5
  3. package/android/src/main/java/com/rnllama/RNLlama.java +139 -4
  4. package/android/src/main/jni.cpp +153 -14
  5. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  9. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  10. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  11. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  12. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  13. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +24 -4
  14. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +22 -2
  15. package/cpp/chat.cpp +128 -106
  16. package/cpp/chat.h +2 -0
  17. package/cpp/common.cpp +41 -76
  18. package/cpp/common.h +23 -19
  19. package/cpp/ggml-backend.cpp +9 -5
  20. package/cpp/ggml-backend.h +4 -4
  21. package/cpp/ggml-cpu/ggml-cpu-aarch64.cpp +0 -2
  22. package/cpp/ggml-cpu/ggml-cpu-quants.c +306 -6
  23. package/cpp/ggml-cpu/ggml-cpu.c +5 -13
  24. package/cpp/ggml-cpu/ggml-cpu.cpp +29 -16
  25. package/cpp/ggml-cpu/ops.cpp +107 -13
  26. package/cpp/ggml-cpu/vec.cpp +0 -6
  27. package/cpp/ggml-cpu/vec.h +16 -0
  28. package/cpp/ggml-llama-sim.metallib +0 -0
  29. package/cpp/ggml-llama.metallib +0 -0
  30. package/cpp/ggml-metal-impl.h +36 -11
  31. package/cpp/ggml-metal.m +321 -132
  32. package/cpp/ggml-opt.cpp +373 -190
  33. package/cpp/ggml-opt.h +49 -28
  34. package/cpp/ggml-quants.c +0 -6
  35. package/cpp/ggml.c +93 -38
  36. package/cpp/ggml.h +21 -7
  37. package/cpp/gguf.cpp +33 -33
  38. package/cpp/llama-adapter.cpp +6 -0
  39. package/cpp/llama-arch.cpp +3 -0
  40. package/cpp/llama-batch.cpp +3 -1
  41. package/cpp/llama-chat.cpp +8 -6
  42. package/cpp/llama-chat.h +1 -0
  43. package/cpp/llama-context.cpp +349 -135
  44. package/cpp/llama-context.h +30 -3
  45. package/cpp/llama-cparams.h +1 -0
  46. package/cpp/llama-graph.cpp +150 -234
  47. package/cpp/llama-graph.h +52 -7
  48. package/cpp/llama-hparams.cpp +17 -1
  49. package/cpp/llama-hparams.h +34 -5
  50. package/cpp/llama-kv-cache.cpp +662 -321
  51. package/cpp/llama-kv-cache.h +203 -93
  52. package/cpp/llama-memory.h +3 -2
  53. package/cpp/llama-model-loader.cpp +24 -15
  54. package/cpp/llama-model-saver.cpp +281 -0
  55. package/cpp/llama-model-saver.h +37 -0
  56. package/cpp/llama-model.cpp +536 -132
  57. package/cpp/llama-model.h +7 -1
  58. package/cpp/llama-sampling.cpp +18 -6
  59. package/cpp/llama-vocab.cpp +46 -8
  60. package/cpp/llama-vocab.h +6 -0
  61. package/cpp/llama.cpp +14 -0
  62. package/cpp/llama.h +72 -131
  63. package/cpp/minja/chat-template.hpp +9 -5
  64. package/cpp/minja/minja.hpp +69 -36
  65. package/cpp/rn-llama.cpp +611 -47
  66. package/cpp/rn-llama.h +33 -3
  67. package/cpp/sampling.cpp +57 -50
  68. package/cpp/tools/mtmd/clip-impl.h +462 -0
  69. package/cpp/tools/mtmd/clip.cpp +4024 -0
  70. package/cpp/tools/mtmd/clip.h +101 -0
  71. package/cpp/tools/mtmd/miniaudio.h +93468 -0
  72. package/cpp/tools/mtmd/mtmd-audio.cpp +855 -0
  73. package/cpp/tools/mtmd/mtmd-audio.h +62 -0
  74. package/cpp/tools/mtmd/mtmd-helper.cpp +297 -0
  75. package/cpp/tools/mtmd/mtmd.cpp +942 -0
  76. package/cpp/tools/mtmd/mtmd.h +362 -0
  77. package/cpp/tools/mtmd/stb_image.h +7988 -0
  78. package/ios/CMakeLists.txt +7 -0
  79. package/ios/RNLlama.mm +77 -3
  80. package/ios/RNLlamaContext.h +5 -1
  81. package/ios/RNLlamaContext.mm +105 -10
  82. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +2 -0
  83. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +23 -19
  84. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
  85. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  86. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
  87. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +21 -7
  88. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
  89. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +30 -3
  90. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
  91. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +52 -7
  92. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +34 -5
  93. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  94. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +3 -2
  95. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
  96. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +7 -1
  97. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
  98. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +72 -131
  99. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  100. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
  101. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +33 -3
  102. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
  103. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  104. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
  105. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
  106. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +23 -19
  107. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
  108. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  109. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
  110. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +21 -7
  111. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
  112. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +30 -3
  113. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
  114. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +52 -7
  115. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +34 -5
  116. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  117. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +3 -2
  118. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
  119. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +7 -1
  120. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
  121. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +72 -131
  122. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  123. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
  124. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +33 -3
  125. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  126. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
  127. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  128. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  129. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +2 -0
  130. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +23 -19
  131. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
  132. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  133. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
  134. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +21 -7
  135. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
  136. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +30 -3
  137. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
  138. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +52 -7
  139. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +34 -5
  140. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  141. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +3 -2
  142. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
  143. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +7 -1
  144. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
  145. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +72 -131
  146. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  147. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
  148. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +33 -3
  149. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
  150. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  151. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
  152. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
  153. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +23 -19
  154. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
  155. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  156. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
  157. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +21 -7
  158. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
  159. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +30 -3
  160. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
  161. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +52 -7
  162. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +34 -5
  163. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  164. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +3 -2
  165. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
  166. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +7 -1
  167. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
  168. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +72 -131
  169. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  170. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
  171. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +33 -3
  172. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  173. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
  174. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  175. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  176. package/jest/mock.js +33 -7
  177. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  178. package/lib/commonjs/index.js +153 -21
  179. package/lib/commonjs/index.js.map +1 -1
  180. package/lib/module/NativeRNLlama.js.map +1 -1
  181. package/lib/module/index.js +152 -20
  182. package/lib/module/index.js.map +1 -1
  183. package/lib/typescript/NativeRNLlama.d.ts +50 -4
  184. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  185. package/lib/typescript/index.d.ts +72 -6
  186. package/lib/typescript/index.d.ts.map +1 -1
  187. package/package.json +1 -1
  188. package/src/NativeRNLlama.ts +67 -4
  189. package/src/index.ts +212 -38
  190. package/lib/commonjs/chat.js +0 -37
  191. package/lib/commonjs/chat.js.map +0 -1
  192. package/lib/module/chat.js +0 -33
  193. package/lib/module/chat.js.map +0 -1
  194. package/lib/typescript/chat.d.ts +0 -10
  195. package/lib/typescript/chat.d.ts.map +0 -1
  196. package/src/chat.ts +0 -44
@@ -0,0 +1,37 @@
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+ #include "llama-arch.h"
5
+
6
+ #include <vector>
7
+
8
+ struct llama_model_saver {
9
+ struct lm_gguf_context * lm_gguf_ctx = nullptr;
10
+ const struct llama_model & model;
11
+ const struct LLM_KV llm_kv;
12
+
13
+ llama_model_saver(const struct llama_model & model);
14
+ ~llama_model_saver();
15
+
16
+ void add_kv(enum llm_kv key, uint32_t value);
17
+ void add_kv(enum llm_kv key, int32_t value);
18
+ void add_kv(enum llm_kv key, float value);
19
+ void add_kv(enum llm_kv key, bool value);
20
+ void add_kv(enum llm_kv key, const char * value);
21
+
22
+ [[noreturn]]
23
+ void add_kv(enum llm_kv key, char value); // needed to make the template below compile
24
+
25
+ template <typename Container>
26
+ void add_kv(enum llm_kv key, const Container & value, bool per_layer = false);
27
+
28
+ void add_kv(enum llm_kv key, const std::vector<std::string> & value);
29
+
30
+ void add_tensor(const struct lm_ggml_tensor * tensor);
31
+
32
+ void add_kv_from_model();
33
+
34
+ void add_tensors_from_model();
35
+
36
+ void save(const std::string & path_model);
37
+ };
@@ -76,6 +76,7 @@ enum llm_type {
76
76
  LLM_TYPE_236B,
77
77
  LLM_TYPE_290B,
78
78
  LLM_TYPE_314B,
79
+ LLM_TYPE_405B,
79
80
  LLM_TYPE_671B,
80
81
  LLM_TYPE_SMALL,
81
82
  LLM_TYPE_MEDIUM,
@@ -95,6 +96,8 @@ enum llm_type {
95
96
  LLM_TYPE_235B_A22B,
96
97
  };
97
98
 
99
+ std::string llama_rope_scaling_type_name(llama_rope_scaling_type rope_scaling_type);
100
+
98
101
  struct llama_layer_posnet {
99
102
  // resnet
100
103
  struct lm_ggml_tensor * norm1 = nullptr;
@@ -395,7 +398,10 @@ struct llama_model {
395
398
 
396
399
  const struct lm_ggml_tensor * get_tensor(const char * name) const;
397
400
 
398
- lm_ggml_tensor * get_rope_factors(uint32_t n_ctx_per_seq, int il) const;
401
+ float get_rope_freq_base (const llama_cparams & cparams, int il) const;
402
+ float get_rope_freq_scale(const llama_cparams & cparams, int il) const;
403
+
404
+ lm_ggml_tensor * get_rope_factors(const llama_cparams & cparams, int il) const;
399
405
 
400
406
  // note: can mutate `cparams`
401
407
  // TODO: move this to new llm_arch_model_i interface
@@ -21,6 +21,9 @@ struct llama_vocab {
21
21
 
22
22
  void load(llama_model_loader & ml, const LLM_KV & kv);
23
23
 
24
+ std::string get_tokenizer_model() const;
25
+ std::string get_tokenizer_pre() const;
26
+
24
27
  enum llama_vocab_type get_type() const;
25
28
  enum llama_vocab_pre_type get_pre_type() const;
26
29
 
@@ -80,6 +83,9 @@ struct llama_vocab {
80
83
  int max_token_len() const;
81
84
 
82
85
  int find_bpe_rank(const std::string & token_left, const std::string & token_right) const;
86
+ std::vector<std::string> get_bpe_merges() const;
87
+
88
+ std::vector<char> get_precompiled_charsmap() const;
83
89
 
84
90
  int32_t tokenize(
85
91
  const char * text,
@@ -4,6 +4,7 @@
4
4
  #include "ggml.h"
5
5
  #include "ggml-cpu.h"
6
6
  #include "ggml-backend.h"
7
+ #include "ggml-opt.h"
7
8
 
8
9
  #include <stddef.h>
9
10
  #include <stdint.h>
@@ -113,6 +114,7 @@ extern "C" {
113
114
  LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32,
114
115
  LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33,
115
116
  LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34,
117
+ LLAMA_VOCAB_PRE_TYPE_SEED_CODER = 35,
116
118
  };
117
119
 
118
120
  enum llama_rope_type {
@@ -344,7 +346,7 @@ extern "C" {
344
346
  float yarn_beta_fast; // YaRN low correction dim
345
347
  float yarn_beta_slow; // YaRN high correction dim
346
348
  uint32_t yarn_orig_ctx; // YaRN original context size
347
- float defrag_thold; // defragment the KV cache if holes/size > thold, < 0 disabled (default)
349
+ float defrag_thold; // defragment the KV cache if holes/size > thold, <= 0 disabled (default)
348
350
 
349
351
  lm_ggml_backend_sched_eval_callback cb_eval;
350
352
  void * cb_eval_user_data;
@@ -352,19 +354,19 @@ extern "C" {
352
354
  enum lm_ggml_type type_k; // data type for K cache [EXPERIMENTAL]
353
355
  enum lm_ggml_type type_v; // data type for V cache [EXPERIMENTAL]
354
356
 
355
- // Keep the booleans together and at the end of the struct to avoid misalignment during copy-by-value.
356
- // TODO: move at the end of the struct
357
- bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
358
- bool embeddings; // if true, extract embeddings (together with logits)
359
- bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
360
- bool flash_attn; // whether to use flash attention [EXPERIMENTAL]
361
- bool no_perf; // whether to measure performance timings
362
-
363
357
  // Abort callback
364
358
  // if it returns true, execution of llama_decode() will be aborted
365
359
  // currently works only with CPU execution
366
360
  lm_ggml_abort_callback abort_callback;
367
361
  void * abort_callback_data;
362
+
363
+ // Keep the booleans together and at the end of the struct to avoid misalignment during copy-by-value.
364
+ bool embeddings; // if true, extract embeddings (together with logits)
365
+ bool offload_kqv; // offload the KQV ops (including the KV cache) to GPU
366
+ bool flash_attn; // use flash attention [EXPERIMENTAL]
367
+ bool no_perf; // measure performance timings
368
+ bool op_offload; // offload host tensor operations to device
369
+ bool swa_full; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
368
370
  };
369
371
 
370
372
  // model quantization parameters
@@ -446,6 +448,10 @@ extern "C" {
446
448
  size_t n_paths,
447
449
  struct llama_model_params params);
448
450
 
451
+ LLAMA_API void llama_model_save_to_file(
452
+ const struct llama_model * model,
453
+ const char * path_model);
454
+
449
455
  DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
450
456
  "use llama_model_free instead");
451
457
 
@@ -603,71 +609,14 @@ extern "C" {
603
609
  // KV cache
604
610
  //
605
611
 
606
- // TODO: start using struct llama_kv_cache
607
-
608
- // Information associated with an individual cell in the KV cache view.
609
- struct llama_kv_cache_view_cell {
610
- // The position for this cell. Takes KV cache shifts into account.
611
- // May be negative if the cell is not populated.
612
- llama_pos pos;
613
- };
614
-
615
- // An updateable view of the KV cache.
616
- struct llama_kv_cache_view {
617
- // Number of KV cache cells. This will be the same as the context size.
618
- int32_t n_cells;
619
-
620
- // Maximum number of sequences that can exist in a cell. It's not an error
621
- // if there are more sequences in a cell than this value, however they will
622
- // not be visible in the view cells_sequences.
623
- int32_t n_seq_max;
624
-
625
- // Number of tokens in the cache. For example, if there are two populated
626
- // cells, the first with 1 sequence id in it and the second with 2 sequence
627
- // ids then you'll have 3 tokens.
628
- int32_t token_count;
629
-
630
- // Number of populated cache cells.
631
- int32_t used_cells;
632
-
633
- // Maximum contiguous empty slots in the cache.
634
- int32_t max_contiguous;
635
-
636
- // Index to the start of the max_contiguous slot range. Can be negative
637
- // when cache is full.
638
- int32_t max_contiguous_idx;
639
-
640
- // Information for an individual cell.
641
- struct llama_kv_cache_view_cell * cells;
642
-
643
- // The sequences for each cell. There will be n_seq_max items per cell.
644
- llama_seq_id * cells_sequences;
645
- };
646
-
647
- // Create an empty KV cache view. (use only for debugging purposes)
648
- LLAMA_API struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max);
649
-
650
- // Free a KV cache view. (use only for debugging purposes)
651
- LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view);
652
-
653
- // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes)
654
- // TODO: change signature to llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct llama_context * ctx)
655
- LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view);
656
-
657
- ///
658
-
659
612
  // Returns the number of tokens in the KV cache (slow, use only for debug)
660
613
  // If a KV cell has multiple sequences assigned to it, it will be counted multiple times
661
- LLAMA_API int32_t llama_kv_self_n_tokens(const struct llama_context * ctx);
662
-
663
- DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx),
664
- "use llama_kv_self_n_tokens instead");
614
+ DEPRECATED(LLAMA_API int32_t llama_kv_self_n_tokens(const struct llama_context * ctx),
615
+ "Use llama_kv_self_seq_pos_max() instead");
665
616
 
666
617
  // Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
667
- LLAMA_API int32_t llama_kv_self_used_cells(const struct llama_context * ctx);
668
-
669
- DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx),
670
- "use llama_kv_self_used_cells instead");
618
+ DEPRECATED(LLAMA_API int32_t llama_kv_self_used_cells(const struct llama_context * ctx),
619
+ "Use llama_kv_self_seq_pos_max() instead");
671
620
 
672
621
  // Clear the KV cache - both cell info is erased and KV data is zeroed
673
622
  LLAMA_API void llama_kv_self_clear(
@@ -726,10 +675,18 @@ extern "C" {
726
675
  llama_pos p1,
727
676
  int d);
728
677
 
678
+ // Returns the smallest position present in the KV cache for the specified sequence
679
+ // This is typically non-zero only for SWA caches
680
+ // Return -1 if the sequence is empty
681
+ LLAMA_API llama_pos llama_kv_self_seq_pos_min(
682
+ struct llama_context * ctx,
683
+ llama_seq_id seq_id);
684
+
729
685
  // Returns the largest position present in the KV cache for the specified sequence
686
+ // Return -1 if the sequence is empty
730
687
  LLAMA_API llama_pos llama_kv_self_seq_pos_max(
731
688
  struct llama_context * ctx,
732
- llama_seq_id seq_id);
689
+ llama_seq_id seq_id);
733
690
 
734
691
  // Defragment the KV cache
735
692
  // This will be applied:
@@ -743,61 +700,6 @@ extern "C" {
743
700
  // Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
744
701
  LLAMA_API void llama_kv_self_update(struct llama_context * ctx);
745
702
 
746
- DEPRECATED(LLAMA_API void llama_kv_cache_clear(
747
- struct llama_context * ctx),
748
- "use llama_kv_self_clear instead");
749
-
750
- DEPRECATED(LLAMA_API bool llama_kv_cache_seq_rm(
751
- struct llama_context * ctx,
752
- llama_seq_id seq_id,
753
- llama_pos p0,
754
- llama_pos p1),
755
- "use llama_kv_self_seq_rm instead");
756
-
757
- DEPRECATED(LLAMA_API void llama_kv_cache_seq_cp(
758
- struct llama_context * ctx,
759
- llama_seq_id seq_id_src,
760
- llama_seq_id seq_id_dst,
761
- llama_pos p0,
762
- llama_pos p1),
763
- "use llama_kv_self_seq_cp instead");
764
-
765
- DEPRECATED(LLAMA_API void llama_kv_cache_seq_keep(
766
- struct llama_context * ctx,
767
- llama_seq_id seq_id),
768
- "use llama_kv_self_seq_keep instead");
769
-
770
- DEPRECATED(LLAMA_API void llama_kv_cache_seq_add(
771
- struct llama_context * ctx,
772
- llama_seq_id seq_id,
773
- llama_pos p0,
774
- llama_pos p1,
775
- llama_pos delta),
776
- "use llama_kv_self_seq_add instead");
777
-
778
- DEPRECATED(LLAMA_API void llama_kv_cache_seq_div(
779
- struct llama_context * ctx,
780
- llama_seq_id seq_id,
781
- llama_pos p0,
782
- llama_pos p1,
783
- int d),
784
- "use llama_kv_self_seq_div instead");
785
-
786
- DEPRECATED(LLAMA_API llama_pos llama_kv_cache_seq_pos_max(
787
- struct llama_context * ctx,
788
- llama_seq_id seq_id),
789
- "use llama_kv_self_seq_pos_max instead");
790
-
791
- DEPRECATED(LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx),
792
- "use llama_kv_self_defrag instead");
793
-
794
- DEPRECATED(LLAMA_API bool llama_kv_cache_can_shift(const struct llama_context * ctx),
795
- "use llama_kv_self_can_shift instead");
796
-
797
- DEPRECATED(LLAMA_API void llama_kv_cache_update(struct llama_context * ctx),
798
- "use llama_kv_self_update instead");
799
-
800
-
801
703
  //
802
704
  // State / sessions
803
705
  //
@@ -925,18 +827,26 @@ extern "C" {
925
827
  // Frees a batch of tokens allocated with llama_batch_init()
926
828
  LLAMA_API void llama_batch_free(struct llama_batch batch);
927
829
 
928
- // Processes a batch of tokens with the ecoder part of the encoder-decoder model.
929
- // Stores the encoder output internally for later use by the decoder cross-attention layers.
830
+ // Process a batch of tokens.
831
+ // In contrast to llama_decode() - this call does not use KV cache.
832
+ // For encode-decoder contexts, processes the batch using the encoder.
833
+ // Can store the encoder output internally for later use by the decoder's cross-attention layers.
930
834
  // 0 - success
931
835
  // < 0 - error. the KV cache state is restored to the state before this call
932
836
  LLAMA_API int32_t llama_encode(
933
837
  struct llama_context * ctx,
934
838
  struct llama_batch batch);
935
839
 
840
+ // Process a batch of tokens.
841
+ // Requires KV cache.
842
+ // For encode-decoder contexts, processes the batch using the decoder.
936
843
  // Positive return values does not mean a fatal error, but rather a warning.
937
- // 0 - success
938
- // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
939
- // < 0 - error. the KV cache state is restored to the state before this call
844
+ // Upon non-zero return values, the KV cache state is restored to the state before this call
845
+ // 0 - success
846
+ // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
847
+ // 2 - aborted
848
+ // -1 - invalid input batch
849
+ // < -1 - error
940
850
  LLAMA_API int32_t llama_decode(
941
851
  struct llama_context * ctx,
942
852
  struct llama_batch batch);
@@ -1429,6 +1339,37 @@ extern "C" {
1429
1339
  LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain);
1430
1340
  LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain);
1431
1341
 
1342
+ //
1343
+ // training
1344
+ //
1345
+
1346
+ // function that returns whether or not a given tensor contains trainable parameters
1347
+ typedef bool (*llama_opt_param_filter)(const struct lm_ggml_tensor * tensor, void * userdata);
1348
+
1349
+ // always returns true
1350
+ LLAMA_API bool llama_opt_param_filter_all(const struct lm_ggml_tensor * tensor, void * userdata);
1351
+
1352
+ struct llama_opt_params {
1353
+ uint32_t n_ctx_train; // assumed context size post training, use context size specified in llama_context if 0
1354
+
1355
+ llama_opt_param_filter param_filter; // callback for determining which tensors contain trainable parameters
1356
+ void * param_filter_ud; // userdata for determining which tensors contain trainable parameters
1357
+
1358
+ lm_ggml_opt_get_optimizer_params get_opt_pars; // callback for calculating optimizer parameters
1359
+ void * get_opt_pars_ud; // userdata for calculating optimizer parameters
1360
+ };
1361
+
1362
+ LLAMA_API void llama_opt_init(struct llama_context * lctx, struct llama_model * model, struct llama_opt_params lopt_params);
1363
+
1364
+ LLAMA_API void llama_opt_epoch(
1365
+ struct llama_context * lctx,
1366
+ lm_ggml_opt_dataset_t dataset,
1367
+ lm_ggml_opt_result_t result_train,
1368
+ lm_ggml_opt_result_t result_eval,
1369
+ int64_t idata_split,
1370
+ lm_ggml_opt_epoch_callback callback_train,
1371
+ lm_ggml_opt_epoch_callback callback_eval);
1372
+
1432
1373
  #ifdef __cplusplus
1433
1374
  }
1434
1375
  #endif
@@ -13,10 +13,12 @@
13
13
  #include <chrono>
14
14
  #include <cstddef>
15
15
  #include <cstdio>
16
+ #include <ctime>
16
17
  #include <exception>
17
18
  #include <iomanip>
18
19
  #include <memory>
19
20
  #include <sstream>
21
+ #include <stdexcept>
20
22
  #include <string>
21
23
  #include <vector>
22
24
 
@@ -393,8 +395,8 @@ class chat_template {
393
395
 
394
396
  for (const auto & message_ : adjusted_messages) {
395
397
  auto message = message_;
396
- if (!message.contains("role") || !message.contains("content")) {
397
- throw std::runtime_error("message must have 'role' and 'content' fields: " + message.dump());
398
+ if (!message.contains("role") || (!message.contains("content") && !message.contains("tool_calls"))) {
399
+ throw std::runtime_error("message must have 'role' and one of 'content' or 'tool_calls' fields: " + message.dump());
398
400
  }
399
401
  std::string role = message.at("role");
400
402
 
@@ -415,7 +417,6 @@ class chat_template {
415
417
  }
416
418
  }
417
419
  if (polyfill_tool_calls) {
418
- auto content = message.at("content");
419
420
  auto tool_calls = json::array();
420
421
  for (const auto & tool_call : message.at("tool_calls")) {
421
422
  if (tool_call.at("type") != "function") {
@@ -434,8 +435,11 @@ class chat_template {
434
435
  auto obj = json {
435
436
  {"tool_calls", tool_calls},
436
437
  };
437
- if (!content.is_null() && !content.empty()) {
438
- obj["content"] = content;
438
+ if (message.contains("content")) {
439
+ auto content = message.at("content");
440
+ if (!content.is_null() && !content.empty()) {
441
+ obj["content"] = content;
442
+ }
439
443
  }
440
444
  message["content"] = obj.dump(2);
441
445
  message.erase("tool_calls");
@@ -11,6 +11,7 @@
11
11
  #include <algorithm>
12
12
  #include <cctype>
13
13
  #include <cstddef>
14
+ #include <cstdint>
14
15
  #include <cmath>
15
16
  #include <exception>
16
17
  #include <functional>
@@ -233,7 +234,7 @@ public:
233
234
  }
234
235
  } else if (is_object()) {
235
236
  if (!index.is_hashable())
236
- throw std::runtime_error("Unashable type: " + index.dump());
237
+ throw std::runtime_error("Unhashable type: " + index.dump());
237
238
  auto it = object_->find(index.primitive_);
238
239
  if (it == object_->end())
239
240
  throw std::runtime_error("Key not found: " + index.dump());
@@ -252,7 +253,7 @@ public:
252
253
  auto index = key.get<int>();
253
254
  return array_->at(index < 0 ? array_->size() + index : index);
254
255
  } else if (object_) {
255
- if (!key.is_hashable()) throw std::runtime_error("Unashable type: " + dump());
256
+ if (!key.is_hashable()) throw std::runtime_error("Unhashable type: " + dump());
256
257
  auto it = object_->find(key.primitive_);
257
258
  if (it == object_->end()) return Value();
258
259
  return it->second;
@@ -261,7 +262,7 @@ public:
261
262
  }
262
263
  void set(const Value& key, const Value& value) {
263
264
  if (!object_) throw std::runtime_error("Value is not an object: " + dump());
264
- if (!key.is_hashable()) throw std::runtime_error("Unashable type: " + dump());
265
+ if (!key.is_hashable()) throw std::runtime_error("Unhashable type: " + dump());
265
266
  (*object_)[key.primitive_] = value;
266
267
  }
267
268
  Value call(const std::shared_ptr<Context> & context, ArgumentsValue & args) const {
@@ -398,7 +399,7 @@ public:
398
399
  }
399
400
  return false;
400
401
  } else if (object_) {
401
- if (!value.is_hashable()) throw std::runtime_error("Unashable type: " + value.dump());
402
+ if (!value.is_hashable()) throw std::runtime_error("Unhashable type: " + value.dump());
402
403
  return object_->find(value.primitive_) != object_->end();
403
404
  } else {
404
405
  throw std::runtime_error("contains can only be called on arrays and objects: " + dump());
@@ -416,7 +417,7 @@ public:
416
417
  return const_cast<Value*>(this)->at(index);
417
418
  }
418
419
  Value& at(const Value & index) {
419
- if (!index.is_hashable()) throw std::runtime_error("Unashable type: " + dump());
420
+ if (!index.is_hashable()) throw std::runtime_error("Unhashable type: " + dump());
420
421
  if (is_array()) return array_->at(index.get<int>());
421
422
  if (is_object()) return object_->at(index.primitive_);
422
423
  throw std::runtime_error("Value is not an array or object: " + dump());
@@ -676,8 +677,8 @@ public:
676
677
  class VariableExpr : public Expression {
677
678
  std::string name;
678
679
  public:
679
- VariableExpr(const Location & location, const std::string& n)
680
- : Expression(location), name(n) {}
680
+ VariableExpr(const Location & loc, const std::string& n)
681
+ : Expression(loc), name(n) {}
681
682
  std::string get_name() const { return name; }
682
683
  Value do_evaluate(const std::shared_ptr<Context> & context) const override {
683
684
  if (!context->contains(name)) {
@@ -1200,9 +1201,9 @@ public:
1200
1201
 
1201
1202
  class SliceExpr : public Expression {
1202
1203
  public:
1203
- std::shared_ptr<Expression> start, end;
1204
- SliceExpr(const Location & loc, std::shared_ptr<Expression> && s, std::shared_ptr<Expression> && e)
1205
- : Expression(loc), start(std::move(s)), end(std::move(e)) {}
1204
+ std::shared_ptr<Expression> start, end, step;
1205
+ SliceExpr(const Location & loc, std::shared_ptr<Expression> && s, std::shared_ptr<Expression> && e, std::shared_ptr<Expression> && st = nullptr)
1206
+ : Expression(loc), start(std::move(s)), end(std::move(e)), step(std::move(st)) {}
1206
1207
  Value do_evaluate(const std::shared_ptr<Context> &) const override {
1207
1208
  throw std::runtime_error("SliceExpr not implemented");
1208
1209
  }
@@ -1219,18 +1220,35 @@ public:
1219
1220
  if (!index) throw std::runtime_error("SubscriptExpr.index is null");
1220
1221
  auto target_value = base->evaluate(context);
1221
1222
  if (auto slice = dynamic_cast<SliceExpr*>(index.get())) {
1222
- auto start = slice->start ? slice->start->evaluate(context).get<int64_t>() : 0;
1223
- auto end = slice->end ? slice->end->evaluate(context).get<int64_t>() : (int64_t) target_value.size();
1223
+ auto len = target_value.size();
1224
+ auto wrap = [len](int64_t i) -> int64_t {
1225
+ if (i < 0) {
1226
+ return i + len;
1227
+ }
1228
+ return i;
1229
+ };
1230
+ int64_t step = slice->step ? slice->step->evaluate(context).get<int64_t>() : 1;
1231
+ if (!step) {
1232
+ throw std::runtime_error("slice step cannot be zero");
1233
+ }
1234
+ int64_t start = slice->start ? wrap(slice->start->evaluate(context).get<int64_t>()) : (step < 0 ? len - 1 : 0);
1235
+ int64_t end = slice->end ? wrap(slice->end->evaluate(context).get<int64_t>()) : (step < 0 ? -1 : len);
1224
1236
  if (target_value.is_string()) {
1225
1237
  std::string s = target_value.get<std::string>();
1226
- if (start < 0) start = s.size() + start;
1227
- if (end < 0) end = s.size() + end;
1228
- return s.substr(start, end - start);
1238
+
1239
+ std::string result;
1240
+ if (start < end && step == 1) {
1241
+ result = s.substr(start, end - start);
1242
+ } else {
1243
+ for (int64_t i = start; step > 0 ? i < end : i > end; i += step) {
1244
+ result += s[i];
1245
+ }
1246
+ }
1247
+ return result;
1248
+
1229
1249
  } else if (target_value.is_array()) {
1230
- if (start < 0) start = target_value.size() + start;
1231
- if (end < 0) end = target_value.size() + end;
1232
1250
  auto result = Value::array();
1233
- for (auto i = start; i < end; ++i) {
1251
+ for (int64_t i = start; step > 0 ? i < end : i > end; i += step) {
1234
1252
  result.push_back(target_value.at(i));
1235
1253
  }
1236
1254
  return result;
@@ -1305,6 +1323,8 @@ public:
1305
1323
  if (name == "iterable") return l.is_iterable();
1306
1324
  if (name == "sequence") return l.is_array();
1307
1325
  if (name == "defined") return !l.is_null();
1326
+ if (name == "true") return l.to_bool();
1327
+ if (name == "false") return !l.to_bool();
1308
1328
  throw std::runtime_error("Unknown type for 'is' operator: " + name);
1309
1329
  };
1310
1330
  auto value = eval();
@@ -1520,6 +1540,10 @@ public:
1520
1540
  vargs.expectArgs("endswith method", {1, 1}, {0, 0});
1521
1541
  auto suffix = vargs.args[0].get<std::string>();
1522
1542
  return suffix.length() <= str.length() && std::equal(suffix.rbegin(), suffix.rend(), str.rbegin());
1543
+ } else if (method->get_name() == "startswith") {
1544
+ vargs.expectArgs("startswith method", {1, 1}, {0, 0});
1545
+ auto prefix = vargs.args[0].get<std::string>();
1546
+ return prefix.length() <= str.length() && std::equal(prefix.begin(), prefix.end(), str.begin());
1523
1547
  } else if (method->get_name() == "title") {
1524
1548
  vargs.expectArgs("title method", {0, 0}, {0, 0});
1525
1549
  auto res = str;
@@ -2082,28 +2106,37 @@ private:
2082
2106
 
2083
2107
  while (it != end && consumeSpaces() && peekSymbols({ "[", "." })) {
2084
2108
  if (!consumeToken("[").empty()) {
2085
- std::shared_ptr<Expression> index;
2109
+ std::shared_ptr<Expression> index;
2110
+ auto slice_loc = get_location();
2111
+ std::shared_ptr<Expression> start, end, step;
2112
+ bool has_first_colon = false, has_second_colon = false;
2113
+
2114
+ if (!peekSymbols({ ":" })) {
2115
+ start = parseExpression();
2116
+ }
2117
+
2118
+ if (!consumeToken(":").empty()) {
2119
+ has_first_colon = true;
2120
+ if (!peekSymbols({ ":", "]" })) {
2121
+ end = parseExpression();
2122
+ }
2086
2123
  if (!consumeToken(":").empty()) {
2087
- auto slice_end = parseExpression();
2088
- index = std::make_shared<SliceExpr>(slice_end->location, nullptr, std::move(slice_end));
2089
- } else {
2090
- auto slice_start = parseExpression();
2091
- if (!consumeToken(":").empty()) {
2092
- consumeSpaces();
2093
- if (peekSymbols({ "]" })) {
2094
- index = std::make_shared<SliceExpr>(slice_start->location, std::move(slice_start), nullptr);
2095
- } else {
2096
- auto slice_end = parseExpression();
2097
- index = std::make_shared<SliceExpr>(slice_start->location, std::move(slice_start), std::move(slice_end));
2098
- }
2099
- } else {
2100
- index = std::move(slice_start);
2124
+ has_second_colon = true;
2125
+ if (!peekSymbols({ "]" })) {
2126
+ step = parseExpression();
2101
2127
  }
2102
2128
  }
2103
- if (!index) throw std::runtime_error("Empty index in subscript");
2104
- if (consumeToken("]").empty()) throw std::runtime_error("Expected closing bracket in subscript");
2129
+ }
2130
+
2131
+ if ((has_first_colon || has_second_colon) && (start || end || step)) {
2132
+ index = std::make_shared<SliceExpr>(slice_loc, std::move(start), std::move(end), std::move(step));
2133
+ } else {
2134
+ index = std::move(start);
2135
+ }
2136
+ if (!index) throw std::runtime_error("Empty index in subscript");
2137
+ if (consumeToken("]").empty()) throw std::runtime_error("Expected closing bracket in subscript");
2105
2138
 
2106
- value = std::make_shared<SubscriptExpr>(value->location, std::move(value), std::move(index));
2139
+ value = std::make_shared<SubscriptExpr>(value->location, std::move(value), std::move(index));
2107
2140
  } else if (!consumeToken(".").empty()) {
2108
2141
  auto identifier = parseIdentifier();
2109
2142
  if (!identifier) throw std::runtime_error("Expected identifier in subscript");