cui-llama.rn 1.3.5 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +22 -1
- package/android/src/main/CMakeLists.txt +25 -20
- package/android/src/main/java/com/rnllama/LlamaContext.java +31 -9
- package/android/src/main/java/com/rnllama/RNLlama.java +98 -0
- package/android/src/main/jni-utils.h +94 -0
- package/android/src/main/jni.cpp +108 -37
- package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +15 -0
- package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +15 -0
- package/cpp/common.cpp +1982 -1965
- package/cpp/common.h +665 -657
- package/cpp/ggml-backend-reg.cpp +5 -0
- package/cpp/ggml-backend.cpp +5 -2
- package/cpp/ggml-cpp.h +1 -0
- package/cpp/ggml-cpu-aarch64.cpp +6 -1
- package/cpp/ggml-cpu-quants.c +5 -1
- package/cpp/ggml-cpu.c +14122 -14122
- package/cpp/ggml-cpu.cpp +627 -627
- package/cpp/ggml-impl.h +11 -16
- package/cpp/ggml-metal-impl.h +288 -0
- package/cpp/ggml-metal.m +2 -2
- package/cpp/ggml-opt.cpp +854 -0
- package/cpp/ggml-opt.h +216 -0
- package/cpp/ggml.c +0 -1276
- package/cpp/ggml.h +0 -140
- package/cpp/gguf.cpp +1325 -0
- package/cpp/gguf.h +202 -0
- package/cpp/llama-adapter.cpp +346 -0
- package/cpp/llama-adapter.h +73 -0
- package/cpp/llama-arch.cpp +1434 -0
- package/cpp/llama-arch.h +395 -0
- package/cpp/llama-batch.cpp +368 -0
- package/cpp/llama-batch.h +88 -0
- package/cpp/llama-chat.cpp +567 -0
- package/cpp/llama-chat.h +51 -0
- package/cpp/llama-context.cpp +1771 -0
- package/cpp/llama-context.h +128 -0
- package/cpp/llama-cparams.cpp +1 -0
- package/cpp/llama-cparams.h +37 -0
- package/cpp/llama-cpp.h +30 -0
- package/cpp/llama-grammar.cpp +1 -0
- package/cpp/llama-grammar.h +3 -1
- package/cpp/llama-hparams.cpp +71 -0
- package/cpp/llama-hparams.h +140 -0
- package/cpp/llama-impl.cpp +167 -0
- package/cpp/llama-impl.h +16 -136
- package/cpp/llama-kv-cache.cpp +718 -0
- package/cpp/llama-kv-cache.h +218 -0
- package/cpp/llama-mmap.cpp +589 -0
- package/cpp/llama-mmap.h +67 -0
- package/cpp/llama-model-loader.cpp +1011 -0
- package/cpp/llama-model-loader.h +158 -0
- package/cpp/llama-model.cpp +2202 -0
- package/cpp/llama-model.h +391 -0
- package/cpp/llama-sampling.cpp +117 -4
- package/cpp/llama-vocab.cpp +21 -28
- package/cpp/llama-vocab.h +13 -1
- package/cpp/llama.cpp +12547 -23528
- package/cpp/llama.h +31 -6
- package/cpp/rn-llama.hpp +90 -87
- package/cpp/sgemm.cpp +776 -70
- package/cpp/sgemm.h +14 -14
- package/cpp/unicode.cpp +6 -0
- package/ios/RNLlama.mm +47 -0
- package/ios/RNLlamaContext.h +3 -1
- package/ios/RNLlamaContext.mm +71 -14
- package/jest/mock.js +15 -3
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/commonjs/index.js +33 -37
- package/lib/commonjs/index.js.map +1 -1
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/module/index.js +31 -35
- package/lib/module/index.js.map +1 -1
- package/lib/typescript/NativeRNLlama.d.ts +26 -6
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +21 -36
- package/lib/typescript/index.d.ts.map +1 -1
- package/llama-rn.podspec +4 -18
- package/package.json +2 -3
- package/src/NativeRNLlama.ts +32 -13
- package/src/index.ts +52 -47
package/cpp/llama-impl.h
CHANGED
@@ -1,10 +1,9 @@
|
|
1
1
|
#pragma once
|
2
2
|
|
3
|
-
#include "
|
3
|
+
#include "ggml.h" // for lm_ggml_log_level
|
4
4
|
|
5
5
|
#include <string>
|
6
6
|
#include <vector>
|
7
|
-
#include <stdexcept>
|
8
7
|
|
9
8
|
#ifdef __GNUC__
|
10
9
|
#ifdef __MINGW32__
|
@@ -35,147 +34,28 @@ void llama_log_callback_default(lm_ggml_log_level level, const char * text, void
|
|
35
34
|
// helpers
|
36
35
|
//
|
37
36
|
|
38
|
-
|
39
|
-
|
37
|
+
template <typename T>
|
38
|
+
struct no_init {
|
39
|
+
T value;
|
40
|
+
no_init() { /* do nothing */ }
|
41
|
+
};
|
40
42
|
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
}
|
45
|
-
}
|
43
|
+
struct time_meas {
|
44
|
+
time_meas(int64_t & t_acc, bool disable = false);
|
45
|
+
~time_meas();
|
46
46
|
|
47
47
|
const int64_t t_start_us;
|
48
48
|
|
49
49
|
int64_t & t_acc;
|
50
50
|
};
|
51
51
|
|
52
|
-
|
53
|
-
if (search.empty()) {
|
54
|
-
return;
|
55
|
-
}
|
56
|
-
std::string builder;
|
57
|
-
builder.reserve(s.length());
|
58
|
-
size_t pos = 0;
|
59
|
-
size_t last_pos = 0;
|
60
|
-
while ((pos = s.find(search, last_pos)) != std::string::npos) {
|
61
|
-
builder.append(s, last_pos, pos - last_pos);
|
62
|
-
builder.append(replace);
|
63
|
-
last_pos = pos + search.length();
|
64
|
-
}
|
65
|
-
builder.append(s, last_pos, std::string::npos);
|
66
|
-
s = std::move(builder);
|
67
|
-
}
|
68
|
-
|
69
|
-
const std::vector<std::pair<std::string, struct lm_ggml_tensor *>> & llama_internal_get_tensor_map(
|
70
|
-
struct llama_context * ctx
|
71
|
-
);
|
72
|
-
|
73
|
-
// the ring buffer works similarly to std::deque, but with a fixed capacity
|
74
|
-
template<typename T>
|
75
|
-
struct ring_buffer {
|
76
|
-
ring_buffer(size_t cap) : capacity(cap), data(cap) {}
|
77
|
-
|
78
|
-
T & front() {
|
79
|
-
if (sz == 0) {
|
80
|
-
throw std::runtime_error("ring buffer is empty");
|
81
|
-
}
|
82
|
-
return data[first];
|
83
|
-
}
|
84
|
-
|
85
|
-
const T & front() const {
|
86
|
-
if (sz == 0) {
|
87
|
-
throw std::runtime_error("ring buffer is empty");
|
88
|
-
}
|
89
|
-
return data[first];
|
90
|
-
}
|
91
|
-
|
92
|
-
T & back() {
|
93
|
-
if (sz == 0) {
|
94
|
-
throw std::runtime_error("ring buffer is empty");
|
95
|
-
}
|
96
|
-
return data[pos];
|
97
|
-
}
|
98
|
-
|
99
|
-
const T & back() const {
|
100
|
-
if (sz == 0) {
|
101
|
-
throw std::runtime_error("ring buffer is empty");
|
102
|
-
}
|
103
|
-
return data[pos];
|
104
|
-
}
|
52
|
+
void replace_all(std::string & s, const std::string & search, const std::string & replace);
|
105
53
|
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
}
|
54
|
+
// TODO: rename to llama_format ?
|
55
|
+
LLAMA_ATTRIBUTE_FORMAT(1, 2)
|
56
|
+
std::string format(const char * fmt, ...);
|
110
57
|
|
111
|
-
|
112
|
-
|
113
|
-
first = (first + 1) % capacity;
|
114
|
-
} else {
|
115
|
-
sz++;
|
116
|
-
}
|
117
|
-
data[pos] = value;
|
118
|
-
pos = (pos + 1) % capacity;
|
119
|
-
}
|
58
|
+
std::string llama_format_tensor_shape(const std::vector<int64_t> & ne);
|
59
|
+
std::string llama_format_tensor_shape(const struct lm_ggml_tensor * t);
|
120
60
|
|
121
|
-
|
122
|
-
if (sz == 0) {
|
123
|
-
throw std::runtime_error("ring buffer is empty");
|
124
|
-
}
|
125
|
-
T value = data[first];
|
126
|
-
first = (first + 1) % capacity;
|
127
|
-
sz--;
|
128
|
-
return value;
|
129
|
-
}
|
130
|
-
|
131
|
-
//T & operator[](size_t i) {
|
132
|
-
// if (i >= sz) {
|
133
|
-
// throw std::runtime_error("ring buffer: index out of bounds");
|
134
|
-
// }
|
135
|
-
// return data[(first + i) % capacity];
|
136
|
-
//}
|
137
|
-
|
138
|
-
//const T & at(size_t i) const {
|
139
|
-
// if (i >= sz) {
|
140
|
-
// throw std::runtime_error("ring buffer: index out of bounds");
|
141
|
-
// }
|
142
|
-
// return data[(first + i) % capacity];
|
143
|
-
//}
|
144
|
-
|
145
|
-
const T & rat(size_t i) const {
|
146
|
-
if (i >= sz) {
|
147
|
-
throw std::runtime_error("ring buffer: index out of bounds");
|
148
|
-
}
|
149
|
-
return data[(first + sz - i - 1) % capacity];
|
150
|
-
}
|
151
|
-
|
152
|
-
std::vector<T> to_vector() const {
|
153
|
-
std::vector<T> result;
|
154
|
-
result.reserve(sz);
|
155
|
-
for (size_t i = 0; i < sz; i++) {
|
156
|
-
result.push_back(data[(first + i) % capacity]);
|
157
|
-
}
|
158
|
-
return result;
|
159
|
-
}
|
160
|
-
|
161
|
-
void clear() {
|
162
|
-
// here only reset the status of the buffer
|
163
|
-
sz = 0;
|
164
|
-
first = 0;
|
165
|
-
pos = 0;
|
166
|
-
}
|
167
|
-
|
168
|
-
bool empty() const {
|
169
|
-
return sz == 0;
|
170
|
-
}
|
171
|
-
|
172
|
-
size_t size() const {
|
173
|
-
return sz;
|
174
|
-
}
|
175
|
-
|
176
|
-
size_t capacity = 0;
|
177
|
-
size_t sz = 0;
|
178
|
-
size_t first = 0;
|
179
|
-
size_t pos = 0;
|
180
|
-
std::vector<T> data;
|
181
|
-
};
|
61
|
+
std::string lm_gguf_kv_to_str(const struct lm_gguf_context * ctx_gguf, int i);
|