cui-llama.rn 1.4.0 → 1.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -23
- package/android/build.gradle +12 -3
- package/android/src/main/CMakeLists.txt +13 -7
- package/android/src/main/java/com/rnllama/LlamaContext.java +27 -20
- package/android/src/main/java/com/rnllama/RNLlama.java +5 -1
- package/android/src/main/jni.cpp +15 -12
- package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
- package/cpp/README.md +1 -1
- package/cpp/common.cpp +158 -267
- package/cpp/common.h +46 -12
- package/cpp/ggml-alloc.c +1042 -1037
- package/cpp/ggml-backend-impl.h +255 -256
- package/cpp/ggml-backend-reg.cpp +582 -582
- package/cpp/ggml-backend.cpp +2002 -2002
- package/cpp/ggml-backend.h +354 -352
- package/cpp/ggml-common.h +1853 -1853
- package/cpp/ggml-cpp.h +39 -39
- package/cpp/ggml-cpu-aarch64.cpp +4247 -4247
- package/cpp/ggml-cpu-aarch64.h +8 -8
- package/cpp/ggml-cpu-impl.h +386 -386
- package/cpp/ggml-cpu-quants.c +10920 -10839
- package/cpp/ggml-cpu-traits.cpp +36 -36
- package/cpp/ggml-cpu-traits.h +38 -38
- package/cpp/ggml-cpu.c +329 -60
- package/cpp/ggml-cpu.cpp +10 -2
- package/cpp/ggml-cpu.h +135 -135
- package/cpp/ggml-impl.h +567 -567
- package/cpp/ggml-metal-impl.h +17 -17
- package/cpp/ggml-metal.m +4884 -4884
- package/cpp/ggml-quants.c +5238 -5238
- package/cpp/ggml-threading.h +14 -14
- package/cpp/ggml.c +6514 -6448
- package/cpp/ggml.h +2194 -2163
- package/cpp/gguf.cpp +1329 -1325
- package/cpp/gguf.h +202 -202
- package/cpp/json-schema-to-grammar.cpp +1045 -1045
- package/cpp/json-schema-to-grammar.h +8 -8
- package/cpp/json.hpp +24766 -24766
- package/cpp/llama-adapter.cpp +347 -346
- package/cpp/llama-adapter.h +74 -73
- package/cpp/llama-arch.cpp +1487 -1434
- package/cpp/llama-arch.h +400 -395
- package/cpp/llama-batch.cpp +368 -368
- package/cpp/llama-batch.h +88 -88
- package/cpp/llama-chat.cpp +578 -567
- package/cpp/llama-chat.h +52 -51
- package/cpp/llama-context.cpp +1775 -1771
- package/cpp/llama-context.h +128 -128
- package/cpp/llama-cparams.cpp +1 -1
- package/cpp/llama-cparams.h +37 -37
- package/cpp/llama-cpp.h +30 -30
- package/cpp/llama-grammar.cpp +1139 -1139
- package/cpp/llama-grammar.h +143 -143
- package/cpp/llama-hparams.cpp +71 -71
- package/cpp/llama-hparams.h +139 -140
- package/cpp/llama-impl.cpp +167 -167
- package/cpp/llama-impl.h +61 -61
- package/cpp/llama-kv-cache.cpp +718 -718
- package/cpp/llama-kv-cache.h +218 -218
- package/cpp/llama-mmap.cpp +2 -1
- package/cpp/llama-mmap.h +67 -67
- package/cpp/llama-model-loader.cpp +1124 -1011
- package/cpp/llama-model-loader.h +167 -158
- package/cpp/llama-model.cpp +3997 -2202
- package/cpp/llama-model.h +370 -391
- package/cpp/llama-sampling.cpp +2408 -2406
- package/cpp/llama-sampling.h +32 -48
- package/cpp/llama-vocab.cpp +3247 -1982
- package/cpp/llama-vocab.h +125 -182
- package/cpp/llama.cpp +416 -2886
- package/cpp/llama.h +1323 -1285
- package/cpp/log.cpp +401 -401
- package/cpp/log.h +121 -121
- package/cpp/rn-llama.cpp +822 -0
- package/cpp/rn-llama.h +123 -0
- package/cpp/rn-llama.hpp +18 -12
- package/cpp/sampling.cpp +505 -500
- package/cpp/sgemm.cpp +2597 -2597
- package/cpp/speculative.cpp +277 -274
- package/cpp/speculative.h +28 -28
- package/cpp/unicode.cpp +2 -3
- package/ios/CMakeLists.txt +99 -0
- package/ios/RNLlama.h +5 -1
- package/ios/RNLlama.mm +2 -2
- package/ios/RNLlamaContext.h +8 -1
- package/ios/RNLlamaContext.mm +15 -11
- package/ios/rnllama.xcframework/Info.plist +74 -0
- package/jest/mock.js +3 -2
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/commonjs/index.js +4 -2
- package/lib/commonjs/index.js.map +1 -1
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/module/index.js +4 -2
- package/lib/module/index.js.map +1 -1
- package/lib/typescript/NativeRNLlama.d.ts +5 -1
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/index.d.ts.map +1 -1
- package/llama-rn.podspec +8 -2
- package/package.json +5 -2
- package/src/NativeRNLlama.ts +5 -1
- package/src/index.ts +9 -2
package/cpp/llama-adapter.h
CHANGED
@@ -1,73 +1,74 @@
|
|
1
|
-
#pragma once
|
2
|
-
|
3
|
-
#include "llama
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
#include <unordered_map>
|
9
|
-
#include <vector>
|
10
|
-
|
11
|
-
//
|
12
|
-
|
13
|
-
//
|
14
|
-
|
15
|
-
//
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
int32_t
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
//
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
//
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
std::vector<
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
#include "llama.h"
|
4
|
+
|
5
|
+
#include "ggml-cpp.h"
|
6
|
+
|
7
|
+
#include <string>
|
8
|
+
#include <unordered_map>
|
9
|
+
#include <vector>
|
10
|
+
|
11
|
+
// TODO: pimpl
|
12
|
+
|
13
|
+
//
|
14
|
+
// llama_adapter_cvec
|
15
|
+
//
|
16
|
+
|
17
|
+
struct llama_adapter_cvec {
|
18
|
+
struct lm_ggml_tensor * tensor_for(int il) const;
|
19
|
+
|
20
|
+
struct lm_ggml_tensor * apply_to(struct lm_ggml_context * ctx, struct lm_ggml_tensor * cur, int il) const;
|
21
|
+
|
22
|
+
int32_t apply(
|
23
|
+
const llama_model & model,
|
24
|
+
const float * data,
|
25
|
+
size_t len,
|
26
|
+
int32_t n_embd,
|
27
|
+
int32_t il_start,
|
28
|
+
int32_t il_end);
|
29
|
+
|
30
|
+
private:
|
31
|
+
bool init(const llama_model & model);
|
32
|
+
|
33
|
+
int32_t layer_start = -1;
|
34
|
+
int32_t layer_end = -1;
|
35
|
+
|
36
|
+
std::vector<lm_ggml_context_ptr> ctxs;
|
37
|
+
std::vector<lm_ggml_backend_buffer_ptr> bufs;
|
38
|
+
|
39
|
+
std::vector<struct lm_ggml_tensor *> tensors; // per layer
|
40
|
+
};
|
41
|
+
|
42
|
+
//
|
43
|
+
// llama_adapter_lora
|
44
|
+
//
|
45
|
+
|
46
|
+
struct llama_adapter_lora_weight {
|
47
|
+
struct lm_ggml_tensor * a = nullptr;
|
48
|
+
struct lm_ggml_tensor * b = nullptr;
|
49
|
+
|
50
|
+
// get actual scale based on rank and alpha
|
51
|
+
float get_scale(float alpha, float adapter_scale) const {
|
52
|
+
const float rank = (float) b->ne[0];
|
53
|
+
const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale;
|
54
|
+
return scale;
|
55
|
+
}
|
56
|
+
|
57
|
+
llama_adapter_lora_weight() = default;
|
58
|
+
llama_adapter_lora_weight(struct lm_ggml_tensor * a, struct lm_ggml_tensor * b) : a(a), b(b) {}
|
59
|
+
};
|
60
|
+
|
61
|
+
struct llama_adapter_lora {
|
62
|
+
// map tensor name to lora_a_b
|
63
|
+
std::unordered_map<std::string, struct llama_adapter_lora_weight> ab_map;
|
64
|
+
|
65
|
+
std::vector<lm_ggml_context_ptr> ctxs;
|
66
|
+
std::vector<lm_ggml_backend_buffer_ptr> bufs;
|
67
|
+
|
68
|
+
float alpha;
|
69
|
+
|
70
|
+
llama_adapter_lora() = default;
|
71
|
+
~llama_adapter_lora() = default;
|
72
|
+
|
73
|
+
llama_adapter_lora_weight * get_weight(struct lm_ggml_tensor * w);
|
74
|
+
};
|