llama-cpp-capacitor 0.0.6 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. package/cpp/LICENSE +21 -0
  2. package/cpp/README.md +4 -0
  3. package/cpp/anyascii.c +22223 -0
  4. package/cpp/anyascii.h +42 -0
  5. package/cpp/chat-parser.cpp +393 -0
  6. package/cpp/chat-parser.h +120 -0
  7. package/cpp/chat.cpp +2315 -0
  8. package/cpp/chat.h +221 -0
  9. package/cpp/common.cpp +1619 -0
  10. package/cpp/common.h +744 -0
  11. package/cpp/ggml-alloc.c +1028 -0
  12. package/cpp/ggml-alloc.h +76 -0
  13. package/cpp/ggml-backend-impl.h +255 -0
  14. package/cpp/ggml-backend-reg.cpp +600 -0
  15. package/cpp/ggml-backend.cpp +2118 -0
  16. package/cpp/ggml-backend.h +354 -0
  17. package/cpp/ggml-common.h +1878 -0
  18. package/cpp/ggml-cpp.h +39 -0
  19. package/cpp/ggml-cpu/amx/amx.cpp +221 -0
  20. package/cpp/ggml-cpu/amx/amx.h +8 -0
  21. package/cpp/ggml-cpu/amx/common.h +91 -0
  22. package/cpp/ggml-cpu/amx/mmq.cpp +2512 -0
  23. package/cpp/ggml-cpu/amx/mmq.h +10 -0
  24. package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  25. package/cpp/ggml-cpu/arch/arm/quants.c +3650 -0
  26. package/cpp/ggml-cpu/arch/arm/repack.cpp +1891 -0
  27. package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
  28. package/cpp/ggml-cpu/arch/x86/quants.c +3820 -0
  29. package/cpp/ggml-cpu/arch/x86/repack.cpp +6307 -0
  30. package/cpp/ggml-cpu/arch-fallback.h +215 -0
  31. package/cpp/ggml-cpu/binary-ops.cpp +158 -0
  32. package/cpp/ggml-cpu/binary-ops.h +16 -0
  33. package/cpp/ggml-cpu/common.h +73 -0
  34. package/cpp/ggml-cpu/ggml-cpu-impl.h +525 -0
  35. package/cpp/ggml-cpu/ggml-cpu.c +3578 -0
  36. package/cpp/ggml-cpu/ggml-cpu.cpp +672 -0
  37. package/cpp/ggml-cpu/ops.cpp +10587 -0
  38. package/cpp/ggml-cpu/ops.h +114 -0
  39. package/cpp/ggml-cpu/quants.c +1193 -0
  40. package/cpp/ggml-cpu/quants.h +97 -0
  41. package/cpp/ggml-cpu/repack.cpp +1982 -0
  42. package/cpp/ggml-cpu/repack.h +120 -0
  43. package/cpp/ggml-cpu/simd-mappings.h +1184 -0
  44. package/cpp/ggml-cpu/traits.cpp +36 -0
  45. package/cpp/ggml-cpu/traits.h +38 -0
  46. package/cpp/ggml-cpu/unary-ops.cpp +186 -0
  47. package/cpp/ggml-cpu/unary-ops.h +28 -0
  48. package/cpp/ggml-cpu/vec.cpp +348 -0
  49. package/cpp/ggml-cpu/vec.h +1121 -0
  50. package/cpp/ggml-cpu.h +145 -0
  51. package/cpp/ggml-impl.h +622 -0
  52. package/cpp/ggml-metal-impl.h +688 -0
  53. package/cpp/ggml-metal.h +66 -0
  54. package/cpp/ggml-metal.m +6833 -0
  55. package/cpp/ggml-opt.cpp +1093 -0
  56. package/cpp/ggml-opt.h +256 -0
  57. package/cpp/ggml-quants.c +5324 -0
  58. package/cpp/ggml-quants.h +106 -0
  59. package/cpp/ggml-threading.cpp +12 -0
  60. package/cpp/ggml-threading.h +14 -0
  61. package/cpp/ggml.c +7108 -0
  62. package/cpp/ggml.h +2492 -0
  63. package/cpp/gguf.cpp +1358 -0
  64. package/cpp/gguf.h +202 -0
  65. package/cpp/json-partial.cpp +256 -0
  66. package/cpp/json-partial.h +38 -0
  67. package/cpp/json-schema-to-grammar.cpp +985 -0
  68. package/cpp/json-schema-to-grammar.h +21 -0
  69. package/cpp/llama-adapter.cpp +388 -0
  70. package/cpp/llama-adapter.h +76 -0
  71. package/cpp/llama-arch.cpp +2355 -0
  72. package/cpp/llama-arch.h +499 -0
  73. package/cpp/llama-batch.cpp +875 -0
  74. package/cpp/llama-batch.h +160 -0
  75. package/cpp/llama-chat.cpp +783 -0
  76. package/cpp/llama-chat.h +65 -0
  77. package/cpp/llama-context.cpp +2748 -0
  78. package/cpp/llama-context.h +306 -0
  79. package/cpp/llama-cparams.cpp +5 -0
  80. package/cpp/llama-cparams.h +41 -0
  81. package/cpp/llama-cpp.h +30 -0
  82. package/cpp/llama-grammar.cpp +1229 -0
  83. package/cpp/llama-grammar.h +173 -0
  84. package/cpp/llama-graph.cpp +1891 -0
  85. package/cpp/llama-graph.h +810 -0
  86. package/cpp/llama-hparams.cpp +180 -0
  87. package/cpp/llama-hparams.h +233 -0
  88. package/cpp/llama-impl.cpp +167 -0
  89. package/cpp/llama-impl.h +61 -0
  90. package/cpp/llama-io.cpp +15 -0
  91. package/cpp/llama-io.h +35 -0
  92. package/cpp/llama-kv-cache-iswa.cpp +318 -0
  93. package/cpp/llama-kv-cache-iswa.h +135 -0
  94. package/cpp/llama-kv-cache.cpp +2059 -0
  95. package/cpp/llama-kv-cache.h +374 -0
  96. package/cpp/llama-kv-cells.h +491 -0
  97. package/cpp/llama-memory-hybrid.cpp +258 -0
  98. package/cpp/llama-memory-hybrid.h +137 -0
  99. package/cpp/llama-memory-recurrent.cpp +1146 -0
  100. package/cpp/llama-memory-recurrent.h +179 -0
  101. package/cpp/llama-memory.cpp +59 -0
  102. package/cpp/llama-memory.h +119 -0
  103. package/cpp/llama-mmap.cpp +600 -0
  104. package/cpp/llama-mmap.h +68 -0
  105. package/cpp/llama-model-loader.cpp +1164 -0
  106. package/cpp/llama-model-loader.h +170 -0
  107. package/cpp/llama-model-saver.cpp +282 -0
  108. package/cpp/llama-model-saver.h +37 -0
  109. package/cpp/llama-model.cpp +19042 -0
  110. package/cpp/llama-model.h +491 -0
  111. package/cpp/llama-sampling.cpp +2575 -0
  112. package/cpp/llama-sampling.h +32 -0
  113. package/cpp/llama-vocab.cpp +3792 -0
  114. package/cpp/llama-vocab.h +176 -0
  115. package/cpp/llama.cpp +358 -0
  116. package/cpp/llama.h +1373 -0
  117. package/cpp/log.cpp +427 -0
  118. package/cpp/log.h +103 -0
  119. package/cpp/minja/chat-template.hpp +550 -0
  120. package/cpp/minja/minja.hpp +3009 -0
  121. package/cpp/nlohmann/json.hpp +25526 -0
  122. package/cpp/nlohmann/json_fwd.hpp +187 -0
  123. package/cpp/regex-partial.cpp +204 -0
  124. package/cpp/regex-partial.h +56 -0
  125. package/cpp/rn-completion.cpp +681 -0
  126. package/cpp/rn-completion.h +116 -0
  127. package/cpp/rn-llama.cpp +345 -0
  128. package/cpp/rn-llama.h +149 -0
  129. package/cpp/rn-mtmd.hpp +602 -0
  130. package/cpp/rn-tts.cpp +591 -0
  131. package/cpp/rn-tts.h +59 -0
  132. package/cpp/sampling.cpp +579 -0
  133. package/cpp/sampling.h +107 -0
  134. package/cpp/tools/mtmd/clip-impl.h +473 -0
  135. package/cpp/tools/mtmd/clip.cpp +4322 -0
  136. package/cpp/tools/mtmd/clip.h +106 -0
  137. package/cpp/tools/mtmd/miniaudio/miniaudio.h +93468 -0
  138. package/cpp/tools/mtmd/mtmd-audio.cpp +769 -0
  139. package/cpp/tools/mtmd/mtmd-audio.h +47 -0
  140. package/cpp/tools/mtmd/mtmd-helper.cpp +460 -0
  141. package/cpp/tools/mtmd/mtmd-helper.h +91 -0
  142. package/cpp/tools/mtmd/mtmd.cpp +1066 -0
  143. package/cpp/tools/mtmd/mtmd.h +298 -0
  144. package/cpp/tools/mtmd/stb/stb_image.h +7988 -0
  145. package/cpp/unicode-data.cpp +7034 -0
  146. package/cpp/unicode-data.h +20 -0
  147. package/cpp/unicode.cpp +1061 -0
  148. package/cpp/unicode.h +68 -0
  149. package/package.json +2 -1
@@ -0,0 +1,179 @@
1
+ #pragma once
2
+
3
+ #include "llama-batch.h"
4
+ #include "llama-graph.h"
5
+ #include "llama-memory.h"
6
+
7
+ #include <set>
8
+ #include <vector>
9
+
10
+ //
11
+ // llama_memory_recurrent
12
+ //
13
+
14
+ // TODO: extract the cache state used for graph computation into llama_memory_recurrent_context_i
15
+ // see the implementation of llama_kv_cache_context_i for an example how to do it
16
+ class llama_memory_recurrent : public llama_memory_i {
17
+ public:
18
+ llama_memory_recurrent(
19
+ const llama_model & model,
20
+ lm_ggml_type type_r,
21
+ lm_ggml_type type_s,
22
+ bool offload,
23
+ uint32_t mem_size,
24
+ uint32_t n_seq_max,
25
+ const layer_filter_cb & filter);
26
+
27
+ ~llama_memory_recurrent() = default;
28
+
29
+ //
30
+ // llama_memory_i
31
+ //
32
+
33
+ llama_memory_context_ptr init_batch(
34
+ llama_batch_allocr & balloc,
35
+ uint32_t n_ubatch,
36
+ bool embd_all) override;
37
+
38
+ llama_memory_context_ptr init_full() override;
39
+
40
+ llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override;
41
+
42
+ void clear(bool data) override;
43
+
44
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
45
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
46
+ void seq_keep(llama_seq_id seq_id) override;
47
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
48
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
49
+
50
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
51
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
52
+
53
+ bool prepare(const std::vector<llama_ubatch> & ubatches);
54
+
55
+ // find a contiguous slot of memory cells and emplace the ubatch there
56
+ bool find_slot(const llama_ubatch & ubatch);
57
+
58
+ bool get_can_shift() const override;
59
+
60
+ // state write/load
61
+
62
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1, llama_state_seq_flags flags = 0) const override;
63
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1, llama_state_seq_flags flags = 0) override;
64
+
65
+ uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
66
+ uint32_t size = 0; // total number of cells, shared across all sequences
67
+ uint32_t used = 0; // used cells (i.e. at least one seq_id)
68
+
69
+ // computed before each graph build
70
+ uint32_t n = 0;
71
+
72
+ // first zero-ed state
73
+ int32_t rs_z = -1;
74
+
75
+ // TODO: optimize for recurrent state needs
76
+ struct mem_cell {
77
+ llama_pos pos = -1;
78
+ int32_t src = -1; // used to know where states should be copied from
79
+ int32_t src0 = -1; // like src, but only used when setting the inputs (allowing to copy once)
80
+ int32_t tail = -1;
81
+
82
+ std::set<llama_seq_id> seq_id;
83
+
84
+ bool has_seq_id(const llama_seq_id & id) const {
85
+ return seq_id.find(id) != seq_id.end();
86
+ }
87
+
88
+ bool is_empty() const {
89
+ return seq_id.empty();
90
+ }
91
+
92
+ bool is_same_seq(const mem_cell & other) const {
93
+ return seq_id == other.seq_id;
94
+ }
95
+ };
96
+
97
+ std::vector<mem_cell> cells;
98
+
99
+ // per layer
100
+ std::vector<lm_ggml_tensor *> r_l;
101
+ std::vector<lm_ggml_tensor *> s_l;
102
+
103
+ private:
104
+ //const llama_model & model;
105
+ const llama_hparams & hparams;
106
+
107
+ const uint32_t n_seq_max = 1;
108
+
109
+ std::vector<lm_ggml_context_ptr> ctxs;
110
+ std::vector<lm_ggml_backend_buffer_ptr> bufs;
111
+
112
+ size_t total_size() const;
113
+
114
+ size_t size_r_bytes() const;
115
+ size_t size_s_bytes() const;
116
+
117
+ void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
118
+ void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
119
+
120
+ bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
121
+ bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
122
+ };
123
+
124
+ class llama_memory_recurrent_context : public llama_memory_context_i {
125
+ public:
126
+ // used for errors
127
+ llama_memory_recurrent_context(llama_memory_status status);
128
+
129
+ // used to create a full-cache or update context
130
+ llama_memory_recurrent_context(
131
+ llama_memory_recurrent * mem);
132
+
133
+ // used to create a batch processing context from a batch
134
+ llama_memory_recurrent_context(
135
+ llama_memory_recurrent * mem,
136
+ std::vector<llama_ubatch> ubatches);
137
+
138
+ virtual ~llama_memory_recurrent_context();
139
+
140
+ //
141
+ // llama_memory_context_i
142
+ //
143
+
144
+ bool next() override;
145
+ bool apply() override;
146
+
147
+ llama_memory_status get_status() const override;
148
+ const llama_ubatch & get_ubatch() const override;
149
+
150
+ //
151
+ // llama_memory_recurrent_context specific API
152
+ //
153
+
154
+ uint32_t get_n_rs() const;
155
+ uint32_t get_head() const;
156
+ int32_t get_rs_z() const;
157
+ uint32_t get_size() const;
158
+
159
+ lm_ggml_tensor * get_r_l(int32_t il) const;
160
+ lm_ggml_tensor * get_s_l(int32_t il) const;
161
+
162
+ int32_t s_copy(int i) const;
163
+
164
+ private:
165
+ const llama_memory_status status;
166
+
167
+ llama_memory_recurrent * mem;
168
+
169
+ size_t i_next = 0;
170
+
171
+ std::vector<llama_ubatch> ubatches;
172
+
173
+ //
174
+ // data needed for building the compute graph for the current ubatch:
175
+ // TODO: extract all the state like `head` and `n` here
176
+ //
177
+
178
+ const bool is_full = false;
179
+ };
@@ -0,0 +1,59 @@
1
+ #include "llama-memory.h"
2
+
3
+ llama_memory_status llama_memory_status_combine(llama_memory_status s0, llama_memory_status s1) {
4
+ bool has_update = false;
5
+
6
+ switch (s0) {
7
+ case LLAMA_MEMORY_STATUS_SUCCESS:
8
+ {
9
+ has_update = true;
10
+ break;
11
+ }
12
+ case LLAMA_MEMORY_STATUS_NO_UPDATE:
13
+ {
14
+ break;
15
+ }
16
+ case LLAMA_MEMORY_STATUS_FAILED_PREPARE:
17
+ case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
18
+ {
19
+ return s0;
20
+ }
21
+ }
22
+
23
+ switch (s1) {
24
+ case LLAMA_MEMORY_STATUS_SUCCESS:
25
+ {
26
+ has_update = true;
27
+ break;
28
+ }
29
+ case LLAMA_MEMORY_STATUS_NO_UPDATE:
30
+ {
31
+ break;
32
+ }
33
+ case LLAMA_MEMORY_STATUS_FAILED_PREPARE:
34
+ case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
35
+ {
36
+ return s1;
37
+ }
38
+ }
39
+
40
+ // if either status has an update, then the combined status has an update
41
+ return has_update ? LLAMA_MEMORY_STATUS_SUCCESS : LLAMA_MEMORY_STATUS_NO_UPDATE;
42
+ }
43
+
44
+ bool llama_memory_status_is_fail(llama_memory_status status) {
45
+ switch (status) {
46
+ case LLAMA_MEMORY_STATUS_SUCCESS:
47
+ case LLAMA_MEMORY_STATUS_NO_UPDATE:
48
+ {
49
+ return false;
50
+ }
51
+ case LLAMA_MEMORY_STATUS_FAILED_PREPARE:
52
+ case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
53
+ {
54
+ return true;
55
+ }
56
+ }
57
+
58
+ return false;
59
+ }
@@ -0,0 +1,119 @@
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+
5
+ #include <memory>
6
+ #include <functional>
7
+
8
+ struct llama_ubatch;
9
+
10
+ class llama_batch_allocr;
11
+
12
+ class llama_io_write_i;
13
+ class llama_io_read_i;
14
+
15
+ struct llama_memory_params {
16
+ // kv cache
17
+ lm_ggml_type type_k;
18
+ lm_ggml_type type_v;
19
+
20
+ // use full-size SWA cache
21
+ bool swa_full;
22
+ };
23
+
24
+ enum llama_memory_status {
25
+ LLAMA_MEMORY_STATUS_SUCCESS = 0,
26
+ LLAMA_MEMORY_STATUS_NO_UPDATE,
27
+ LLAMA_MEMORY_STATUS_FAILED_PREPARE,
28
+ LLAMA_MEMORY_STATUS_FAILED_COMPUTE,
29
+ };
30
+
31
+ // helper function for combining the status of two memory contexts
32
+ // useful for implementing hybrid memory types (e.g. iSWA)
33
+ llama_memory_status llama_memory_status_combine(llama_memory_status s0, llama_memory_status s1);
34
+
35
+ // helper function for checking if a memory status indicates a failure
36
+ bool llama_memory_status_is_fail(llama_memory_status status);
37
+
38
+ // the interface for managing the memory context during batch processing
39
+ // this interface is implemented per memory type. see:
40
+ // - llama_kv_cache_context
41
+ // - llama_kv_cache_iswa_context
42
+ // ...
43
+ //
44
+ // the only method that should mutate the memory and the memory context is llama_memory_i::apply()
45
+ struct llama_memory_context_i {
46
+ virtual ~llama_memory_context_i() = default;
47
+
48
+ // consume the current ubatch from the context and proceed to the next one
49
+ // return false if we are done
50
+ virtual bool next() = 0;
51
+
52
+ // apply the memory state for the current ubatch to the memory object
53
+ // return false on failure
54
+ virtual bool apply() = 0;
55
+
56
+ // get the current ubatch
57
+ virtual const llama_ubatch & get_ubatch() const = 0;
58
+
59
+ // get the status of the memory context - used for error handling and checking if any updates would be applied
60
+ virtual llama_memory_status get_status() const = 0;
61
+ };
62
+
63
+ using llama_memory_context_ptr = std::unique_ptr<llama_memory_context_i>;
64
+
65
+ // general concept of LLM memory
66
+ // the KV cache is a type of LLM memory, but there can be other types
67
+ struct llama_memory_i {
68
+ // this callback is used to filter out layers that should not be included in the cache
69
+ using layer_filter_cb = std::function<bool(int32_t il)>;
70
+
71
+ // this callback is used to specify which layers should reuse memory from other layers
72
+ // return negative value to indicate that the layer il should not reuse memory
73
+ using layer_reuse_cb = std::function<int32_t(int32_t il)>;
74
+
75
+ virtual ~llama_memory_i() = default;
76
+
77
+ // split the input batch into a set of ubatches and verify that they can fit into the cache
78
+ // return a context object containing the ubatches and memory state required to process them
79
+ // check the llama_memory_context_i::get_status() for the result
80
+ virtual llama_memory_context_ptr init_batch(
81
+ llama_batch_allocr & balloc,
82
+ uint32_t n_ubatch,
83
+ bool embd_all) = 0;
84
+
85
+ // simulate full cache, used for allocating worst-case compute buffers
86
+ virtual llama_memory_context_ptr init_full() = 0;
87
+
88
+ // prepare for any pending memory updates, such as shifts, copies, etc.
89
+ // status == LLAMA_MEMORY_STATUS_NO_UPDATE if there is nothing to update
90
+ virtual llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) = 0;
91
+
92
+ // getters
93
+ virtual bool get_can_shift() const = 0;
94
+
95
+ //
96
+ // ops
97
+ //
98
+
99
+ // if data == true, the data buffers will also be cleared together with the metadata
100
+ virtual void clear(bool data) = 0;
101
+
102
+ virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0;
103
+ virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0;
104
+ virtual void seq_keep(llama_seq_id seq_id) = 0;
105
+ virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) = 0;
106
+ virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0;
107
+
108
+ virtual llama_pos seq_pos_min(llama_seq_id seq_id) const = 0;
109
+ virtual llama_pos seq_pos_max(llama_seq_id seq_id) const = 0;
110
+
111
+ //
112
+ // state write/read
113
+ //
114
+
115
+ virtual void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1, llama_state_seq_flags flags = 0) const = 0;
116
+ virtual void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1, llama_state_seq_flags flags = 0) = 0;
117
+ };
118
+
119
+ using llama_memory_ptr = std::unique_ptr<llama_memory_i>;