node-llama-cpp 3.0.0-beta.38 → 3.0.0-beta.39
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bins/linux-arm64/_nlcBuildMetadata.json +1 -1
- package/bins/linux-arm64/libggml.so +0 -0
- package/bins/linux-arm64/libllama.so +0 -0
- package/bins/linux-arm64/llama-addon.node +0 -0
- package/bins/linux-armv7l/_nlcBuildMetadata.json +1 -1
- package/bins/linux-armv7l/libggml.so +0 -0
- package/bins/linux-armv7l/libllama.so +0 -0
- package/bins/linux-armv7l/llama-addon.node +0 -0
- package/bins/linux-x64/_nlcBuildMetadata.json +1 -1
- package/bins/linux-x64/libggml.so +0 -0
- package/bins/linux-x64/libllama.so +0 -0
- package/bins/linux-x64/llama-addon.node +0 -0
- package/bins/linux-x64-vulkan/_nlcBuildMetadata.json +1 -1
- package/bins/linux-x64-vulkan/libggml.so +0 -0
- package/bins/linux-x64-vulkan/libllama.so +0 -0
- package/bins/linux-x64-vulkan/llama-addon.node +0 -0
- package/bins/linux-x64-vulkan/vulkan-shaders-gen +0 -0
- package/bins/mac-arm64-metal/_nlcBuildMetadata.json +1 -1
- package/bins/mac-arm64-metal/ggml-common.h +24 -0
- package/bins/mac-arm64-metal/ggml-metal.metal +181 -552
- package/bins/mac-arm64-metal/libggml.dylib +0 -0
- package/bins/mac-arm64-metal/libllama.dylib +0 -0
- package/bins/mac-arm64-metal/llama-addon.node +0 -0
- package/bins/mac-x64/_nlcBuildMetadata.json +1 -1
- package/bins/mac-x64/libggml.dylib +0 -0
- package/bins/mac-x64/libllama.dylib +0 -0
- package/bins/mac-x64/llama-addon.node +0 -0
- package/bins/win-arm64/_nlcBuildMetadata.json +1 -1
- package/bins/win-arm64/ggml.dll +0 -0
- package/bins/win-arm64/llama-addon.exp +0 -0
- package/bins/win-arm64/llama-addon.lib +0 -0
- package/bins/win-arm64/llama-addon.node +0 -0
- package/bins/win-arm64/llama.dll +0 -0
- package/bins/win-x64/_nlcBuildMetadata.json +1 -1
- package/bins/win-x64/ggml.dll +0 -0
- package/bins/win-x64/llama-addon.node +0 -0
- package/bins/win-x64/llama.dll +0 -0
- package/bins/win-x64-vulkan/_nlcBuildMetadata.json +1 -1
- package/bins/win-x64-vulkan/ggml.dll +0 -0
- package/bins/win-x64-vulkan/llama-addon.node +0 -0
- package/bins/win-x64-vulkan/llama.dll +0 -0
- package/bins/win-x64-vulkan/vulkan-shaders-gen.exe +0 -0
- package/dist/ChatWrapper.d.ts +2 -1
- package/dist/ChatWrapper.js +19 -5
- package/dist/ChatWrapper.js.map +1 -1
- package/dist/bindings/AddonTypes.d.ts +13 -2
- package/dist/bindings/getLlama.d.ts +3 -2
- package/dist/bindings/getLlama.js +1 -1
- package/dist/bindings/getLlama.js.map +1 -1
- package/dist/chatWrappers/FunctionaryChatWrapper.js +8 -5
- package/dist/chatWrappers/FunctionaryChatWrapper.js.map +1 -1
- package/dist/chatWrappers/GemmaChatWrapper.js +1 -1
- package/dist/chatWrappers/GemmaChatWrapper.js.map +1 -1
- package/dist/chatWrappers/Llama3ChatWrapper.js +3 -4
- package/dist/chatWrappers/Llama3ChatWrapper.js.map +1 -1
- package/dist/chatWrappers/Llama3_1ChatWrapper.d.ts +31 -0
- package/dist/chatWrappers/Llama3_1ChatWrapper.js +223 -0
- package/dist/chatWrappers/Llama3_1ChatWrapper.js.map +1 -0
- package/dist/chatWrappers/utils/ChatModelFunctionsDocumentationGenerator.d.ts +17 -2
- package/dist/chatWrappers/utils/ChatModelFunctionsDocumentationGenerator.js +39 -2
- package/dist/chatWrappers/utils/ChatModelFunctionsDocumentationGenerator.js.map +1 -1
- package/dist/chatWrappers/utils/jsonDumps.d.ts +7 -0
- package/dist/chatWrappers/utils/jsonDumps.js +18 -0
- package/dist/chatWrappers/utils/jsonDumps.js.map +1 -0
- package/dist/chatWrappers/utils/resolveChatWrapper.d.ts +5 -3
- package/dist/chatWrappers/utils/resolveChatWrapper.js +50 -4
- package/dist/chatWrappers/utils/resolveChatWrapper.js.map +1 -1
- package/dist/cli/commands/ChatCommand.d.ts +1 -1
- package/dist/cli/commands/ChatCommand.js +2 -4
- package/dist/cli/commands/ChatCommand.js.map +1 -1
- package/dist/cli/commands/CompleteCommand.js +2 -2
- package/dist/cli/commands/CompleteCommand.js.map +1 -1
- package/dist/cli/commands/InfillCommand.js +2 -2
- package/dist/cli/commands/InfillCommand.js.map +1 -1
- package/dist/cli/recommendedModels.js +43 -24
- package/dist/cli/recommendedModels.js.map +1 -1
- package/dist/consts.d.ts +1 -0
- package/dist/consts.js +1 -0
- package/dist/consts.js.map +1 -1
- package/dist/evaluator/LlamaChat/LlamaChat.d.ts +22 -0
- package/dist/evaluator/LlamaChat/LlamaChat.js +65 -34
- package/dist/evaluator/LlamaChat/LlamaChat.js.map +1 -1
- package/dist/evaluator/LlamaChatSession/LlamaChatSession.d.ts +28 -6
- package/dist/evaluator/LlamaChatSession/LlamaChatSession.js +22 -16
- package/dist/evaluator/LlamaChatSession/LlamaChatSession.js.map +1 -1
- package/dist/evaluator/LlamaChatSession/utils/LlamaChatSessionPromptCompletionEngine.js +4 -5
- package/dist/evaluator/LlamaChatSession/utils/LlamaChatSessionPromptCompletionEngine.js.map +1 -1
- package/dist/evaluator/LlamaCompletion.d.ts +13 -2
- package/dist/evaluator/LlamaCompletion.js +10 -5
- package/dist/evaluator/LlamaCompletion.js.map +1 -1
- package/dist/evaluator/LlamaContext/LlamaContext.d.ts +1 -1
- package/dist/evaluator/LlamaContext/LlamaContext.js +60 -0
- package/dist/evaluator/LlamaContext/LlamaContext.js.map +1 -1
- package/dist/evaluator/LlamaContext/types.d.ts +21 -0
- package/dist/evaluator/LlamaGrammar.d.ts +6 -3
- package/dist/evaluator/LlamaGrammar.js +2 -2
- package/dist/evaluator/LlamaGrammar.js.map +1 -1
- package/dist/evaluator/LlamaModel/LlamaModel.d.ts +16 -32
- package/dist/evaluator/LlamaModel/LlamaModel.js +94 -53
- package/dist/evaluator/LlamaModel/LlamaModel.js.map +1 -1
- package/dist/gguf/consts.d.ts +1 -0
- package/dist/gguf/consts.js +4 -0
- package/dist/gguf/consts.js.map +1 -1
- package/dist/gguf/insights/GgufInsights.js +4 -0
- package/dist/gguf/insights/GgufInsights.js.map +1 -1
- package/dist/gguf/parser/GgufV2Parser.js +3 -1
- package/dist/gguf/parser/GgufV2Parser.js.map +1 -1
- package/dist/gguf/types/GgufMetadataTypes.d.ts +16 -0
- package/dist/gguf/types/GgufMetadataTypes.js.map +1 -1
- package/dist/gguf/utils/convertMetadataKeyValueRecordToNestedObject.d.ts +3 -2
- package/dist/gguf/utils/convertMetadataKeyValueRecordToNestedObject.js +44 -8
- package/dist/gguf/utils/convertMetadataKeyValueRecordToNestedObject.js.map +1 -1
- package/dist/index.d.ts +4 -2
- package/dist/index.js +3 -1
- package/dist/index.js.map +1 -1
- package/dist/types.d.ts +15 -1
- package/dist/types.js.map +1 -1
- package/dist/utils/DeepPartialObject.d.ts +3 -0
- package/dist/utils/DeepPartialObject.js +2 -0
- package/dist/utils/DeepPartialObject.js.map +1 -0
- package/dist/utils/StopGenerationDetector.d.ts +6 -3
- package/dist/utils/StopGenerationDetector.js +22 -7
- package/dist/utils/StopGenerationDetector.js.map +1 -1
- package/dist/utils/TokenStreamRegulator.d.ts +1 -0
- package/dist/utils/TokenStreamRegulator.js +23 -5
- package/dist/utils/TokenStreamRegulator.js.map +1 -1
- package/dist/utils/resolveLastTokens.d.ts +2 -0
- package/dist/utils/resolveLastTokens.js +12 -0
- package/dist/utils/resolveLastTokens.js.map +1 -0
- package/llama/CMakeLists.txt +1 -1
- package/llama/addon/AddonContext.cpp +772 -0
- package/llama/addon/AddonContext.h +53 -0
- package/llama/addon/AddonGrammar.cpp +44 -0
- package/llama/addon/AddonGrammar.h +18 -0
- package/llama/addon/AddonGrammarEvaluationState.cpp +28 -0
- package/llama/addon/AddonGrammarEvaluationState.h +15 -0
- package/llama/addon/AddonModel.cpp +681 -0
- package/llama/addon/AddonModel.h +61 -0
- package/llama/addon/AddonModelData.cpp +25 -0
- package/llama/addon/AddonModelData.h +15 -0
- package/llama/addon/AddonModelLora.cpp +107 -0
- package/llama/addon/AddonModelLora.h +28 -0
- package/llama/addon/addon.cpp +217 -0
- package/llama/addon/addonGlobals.cpp +22 -0
- package/llama/addon/addonGlobals.h +12 -0
- package/llama/addon/globals/addonLog.cpp +135 -0
- package/llama/addon/globals/addonLog.h +21 -0
- package/llama/addon/globals/addonProgress.cpp +15 -0
- package/llama/addon/globals/addonProgress.h +15 -0
- package/llama/addon/globals/getGpuInfo.cpp +108 -0
- package/llama/addon/globals/getGpuInfo.h +6 -0
- package/llama/binariesGithubRelease.json +1 -1
- package/llama/gitRelease.bundle +0 -0
- package/llama/grammars/README.md +1 -1
- package/llama/llama.cpp.info.json +1 -1
- package/package.json +3 -3
- package/templates/packed/electron-typescript-react.json +1 -1
- package/templates/packed/node-typescript.json +1 -1
- package/llama/addon.cpp +0 -1997
|
@@ -0,0 +1,772 @@
|
|
|
1
|
+
#include <thread>
|
|
2
|
+
#include <algorithm>
|
|
3
|
+
#include "common.h"
|
|
4
|
+
#include "llama.h"
|
|
5
|
+
|
|
6
|
+
#include "addonGlobals.h"
|
|
7
|
+
#include "AddonModel.h"
|
|
8
|
+
#include "AddonModelLora.h"
|
|
9
|
+
#include "AddonGrammarEvaluationState.h"
|
|
10
|
+
#include "AddonContext.h"
|
|
11
|
+
|
|
12
|
+
static uint64_t calculateBatchMemorySize(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) {
|
|
13
|
+
uint64_t totalSize = 0;
|
|
14
|
+
|
|
15
|
+
if (embd) {
|
|
16
|
+
totalSize += sizeof(float) * n_tokens_alloc * embd;
|
|
17
|
+
} else {
|
|
18
|
+
totalSize += sizeof(llama_token) * n_tokens_alloc;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
totalSize += sizeof(llama_pos) * n_tokens_alloc;
|
|
22
|
+
totalSize += sizeof(int32_t) * n_tokens_alloc;
|
|
23
|
+
totalSize += sizeof(llama_seq_id *) * (n_tokens_alloc + 1);
|
|
24
|
+
|
|
25
|
+
totalSize += sizeof(llama_seq_id) * n_seq_max * n_tokens_alloc;
|
|
26
|
+
|
|
27
|
+
totalSize += sizeof(int8_t) * n_tokens_alloc;
|
|
28
|
+
|
|
29
|
+
return totalSize;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
class AddonContextDecodeBatchWorker : public Napi::AsyncWorker {
|
|
33
|
+
public:
|
|
34
|
+
AddonContext* ctx;
|
|
35
|
+
|
|
36
|
+
AddonContextDecodeBatchWorker(const Napi::Env& env, AddonContext* ctx)
|
|
37
|
+
: Napi::AsyncWorker(env, "AddonContextDecodeBatchWorker"),
|
|
38
|
+
ctx(ctx),
|
|
39
|
+
deferred(Napi::Promise::Deferred::New(env)) {
|
|
40
|
+
ctx->Ref();
|
|
41
|
+
}
|
|
42
|
+
~AddonContextDecodeBatchWorker() {
|
|
43
|
+
ctx->Unref();
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
Napi::Promise GetPromise() {
|
|
47
|
+
return deferred.Promise();
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
protected:
|
|
51
|
+
Napi::Promise::Deferred deferred;
|
|
52
|
+
|
|
53
|
+
void Execute() {
|
|
54
|
+
try {
|
|
55
|
+
// Perform the evaluation using llama_decode.
|
|
56
|
+
int r = llama_decode(ctx->ctx, ctx->batch);
|
|
57
|
+
|
|
58
|
+
if (r != 0) {
|
|
59
|
+
if (r == 1) {
|
|
60
|
+
SetError("could not find a KV slot for the batch (try reducing the size of the batch or increase the context)");
|
|
61
|
+
} else {
|
|
62
|
+
SetError("Eval has failed");
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
return;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
llama_synchronize(ctx->ctx);
|
|
69
|
+
} catch (const std::exception& e) {
|
|
70
|
+
SetError(e.what());
|
|
71
|
+
} catch(...) {
|
|
72
|
+
SetError("Unknown error when calling \"llama_decode\"");
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
void OnOK() {
|
|
76
|
+
deferred.Resolve(Env().Undefined());
|
|
77
|
+
}
|
|
78
|
+
void OnError(const Napi::Error& err) {
|
|
79
|
+
deferred.Reject(err.Value());
|
|
80
|
+
}
|
|
81
|
+
};
|
|
82
|
+
|
|
83
|
+
class AddonContextLoadContextWorker : public Napi::AsyncWorker {
|
|
84
|
+
public:
|
|
85
|
+
AddonContext* context;
|
|
86
|
+
|
|
87
|
+
AddonContextLoadContextWorker(const Napi::Env& env, AddonContext* context)
|
|
88
|
+
: Napi::AsyncWorker(env, "AddonContextLoadContextWorker"),
|
|
89
|
+
context(context),
|
|
90
|
+
deferred(Napi::Promise::Deferred::New(env)) {
|
|
91
|
+
context->Ref();
|
|
92
|
+
}
|
|
93
|
+
~AddonContextLoadContextWorker() {
|
|
94
|
+
context->Unref();
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
Napi::Promise GetPromise() {
|
|
98
|
+
return deferred.Promise();
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
protected:
|
|
102
|
+
Napi::Promise::Deferred deferred;
|
|
103
|
+
|
|
104
|
+
void Execute() {
|
|
105
|
+
try {
|
|
106
|
+
context->ctx = llama_new_context_with_model(context->model->model, context->context_params);
|
|
107
|
+
|
|
108
|
+
context->contextLoaded = context->ctx != nullptr && context->ctx != NULL;
|
|
109
|
+
} catch (const std::exception& e) {
|
|
110
|
+
SetError(e.what());
|
|
111
|
+
} catch(...) {
|
|
112
|
+
SetError("Unknown error when calling \"llama_new_context_with_model\"");
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
void OnOK() {
|
|
116
|
+
if (context->contextLoaded) {
|
|
117
|
+
uint64_t contextMemorySize = llama_state_get_size(context->ctx);
|
|
118
|
+
adjustNapiExternalMemoryAdd(Env(), contextMemorySize);
|
|
119
|
+
context->loadedContextMemorySize = contextMemorySize;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
deferred.Resolve(Napi::Boolean::New(Env(), context->contextLoaded));
|
|
123
|
+
}
|
|
124
|
+
void OnError(const Napi::Error& err) {
|
|
125
|
+
deferred.Reject(err.Value());
|
|
126
|
+
}
|
|
127
|
+
};
|
|
128
|
+
class AddonContextUnloadContextWorker : public Napi::AsyncWorker {
|
|
129
|
+
public:
|
|
130
|
+
AddonContext* context;
|
|
131
|
+
|
|
132
|
+
AddonContextUnloadContextWorker(const Napi::Env& env, AddonContext* context)
|
|
133
|
+
: Napi::AsyncWorker(env, "AddonContextUnloadContextWorker"),
|
|
134
|
+
context(context),
|
|
135
|
+
deferred(Napi::Promise::Deferred::New(env)) {
|
|
136
|
+
context->Ref();
|
|
137
|
+
}
|
|
138
|
+
~AddonContextUnloadContextWorker() {
|
|
139
|
+
context->Unref();
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
Napi::Promise GetPromise() {
|
|
143
|
+
return deferred.Promise();
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
protected:
|
|
147
|
+
Napi::Promise::Deferred deferred;
|
|
148
|
+
|
|
149
|
+
void Execute() {
|
|
150
|
+
try {
|
|
151
|
+
llama_free(context->ctx);
|
|
152
|
+
context->contextLoaded = false;
|
|
153
|
+
|
|
154
|
+
try {
|
|
155
|
+
if (context->has_batch) {
|
|
156
|
+
llama_batch_free(context->batch);
|
|
157
|
+
context->has_batch = false;
|
|
158
|
+
context->batch_n_tokens = 0;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
context->dispose();
|
|
162
|
+
} catch (const std::exception& e) {
|
|
163
|
+
SetError(e.what());
|
|
164
|
+
} catch(...) {
|
|
165
|
+
SetError("Unknown error when calling \"llama_batch_free\"");
|
|
166
|
+
}
|
|
167
|
+
} catch (const std::exception& e) {
|
|
168
|
+
SetError(e.what());
|
|
169
|
+
} catch(...) {
|
|
170
|
+
SetError("Unknown error when calling \"llama_free\"");
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
void OnOK() {
|
|
174
|
+
adjustNapiExternalMemorySubtract(Env(), context->loadedContextMemorySize);
|
|
175
|
+
context->loadedContextMemorySize = 0;
|
|
176
|
+
|
|
177
|
+
adjustNapiExternalMemorySubtract(Env(), context->batchMemorySize);
|
|
178
|
+
context->batchMemorySize = 0;
|
|
179
|
+
|
|
180
|
+
deferred.Resolve(Env().Undefined());
|
|
181
|
+
}
|
|
182
|
+
void OnError(const Napi::Error& err) {
|
|
183
|
+
deferred.Reject(err.Value());
|
|
184
|
+
}
|
|
185
|
+
};
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
class AddonContextSampleTokenWorker : public Napi::AsyncWorker {
|
|
189
|
+
public:
|
|
190
|
+
AddonContext* ctx;
|
|
191
|
+
AddonGrammarEvaluationState* grammar_evaluation_state;
|
|
192
|
+
int32_t batchLogitIndex;
|
|
193
|
+
bool use_grammar = false;
|
|
194
|
+
llama_token result;
|
|
195
|
+
float temperature = 0.0f;
|
|
196
|
+
float min_p = 0;
|
|
197
|
+
int32_t top_k = 40;
|
|
198
|
+
float top_p = 0.95f;
|
|
199
|
+
float repeat_penalty = 1.10f; // 1.0 = disabled
|
|
200
|
+
float repeat_penalty_presence_penalty = 0.00f; // 0.0 = disabled
|
|
201
|
+
float repeat_penalty_frequency_penalty = 0.00f; // 0.0 = disabled
|
|
202
|
+
std::vector<llama_token> repeat_penalty_tokens;
|
|
203
|
+
std::unordered_map<llama_token, float> tokenBiases;
|
|
204
|
+
bool useTokenBiases = false;
|
|
205
|
+
bool use_repeat_penalty = false;
|
|
206
|
+
|
|
207
|
+
AddonContextSampleTokenWorker(const Napi::CallbackInfo& info, AddonContext* ctx)
|
|
208
|
+
: Napi::AsyncWorker(info.Env(), "AddonContextSampleTokenWorker"),
|
|
209
|
+
ctx(ctx),
|
|
210
|
+
deferred(Napi::Promise::Deferred::New(info.Env())) {
|
|
211
|
+
ctx->Ref();
|
|
212
|
+
|
|
213
|
+
batchLogitIndex = info[0].As<Napi::Number>().Int32Value();
|
|
214
|
+
|
|
215
|
+
if (info.Length() > 1 && info[1].IsObject()) {
|
|
216
|
+
Napi::Object options = info[1].As<Napi::Object>();
|
|
217
|
+
|
|
218
|
+
if (options.Has("temperature")) {
|
|
219
|
+
temperature = options.Get("temperature").As<Napi::Number>().FloatValue();
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
if (options.Has("minP")) {
|
|
223
|
+
min_p = options.Get("minP").As<Napi::Number>().FloatValue();
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
if (options.Has("topK")) {
|
|
227
|
+
top_k = options.Get("topK").As<Napi::Number>().Int32Value();
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
if (options.Has("topP")) {
|
|
231
|
+
top_p = options.Get("topP").As<Napi::Number>().FloatValue();
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
if (options.Has("repeatPenalty")) {
|
|
235
|
+
repeat_penalty = options.Get("repeatPenalty").As<Napi::Number>().FloatValue();
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
if (options.Has("repeatPenaltyTokens")) {
|
|
239
|
+
Napi::Uint32Array repeat_penalty_tokens_uint32_array = options.Get("repeatPenaltyTokens").As<Napi::Uint32Array>();
|
|
240
|
+
|
|
241
|
+
repeat_penalty_tokens.reserve(repeat_penalty_tokens_uint32_array.ElementLength());
|
|
242
|
+
for (size_t i = 0; i < repeat_penalty_tokens_uint32_array.ElementLength(); i++) {
|
|
243
|
+
repeat_penalty_tokens.push_back(static_cast<llama_token>(repeat_penalty_tokens_uint32_array[i]));
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
use_repeat_penalty = true;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
if (options.Has("tokenBiasKeys") && options.Has("tokenBiasValues")) {
|
|
250
|
+
Napi::Uint32Array tokenBiasKeys = options.Get("tokenBiasKeys").As<Napi::Uint32Array>();
|
|
251
|
+
Napi::Float32Array tokenBiasValues = options.Get("tokenBiasValues").As<Napi::Float32Array>();
|
|
252
|
+
|
|
253
|
+
if (tokenBiasKeys.ElementLength() == tokenBiasValues.ElementLength()) {
|
|
254
|
+
for (size_t i = 0; i < tokenBiasKeys.ElementLength(); i++) {
|
|
255
|
+
tokenBiases[static_cast<llama_token>(tokenBiasKeys[i])] = tokenBiasValues[i];
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
useTokenBiases = true;
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
if (options.Has("repeatPenaltyPresencePenalty")) {
|
|
263
|
+
repeat_penalty_presence_penalty = options.Get("repeatPenaltyPresencePenalty").As<Napi::Number>().FloatValue();
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
if (options.Has("repeatPenaltyFrequencyPenalty")) {
|
|
267
|
+
repeat_penalty_frequency_penalty = options.Get("repeatPenaltyFrequencyPenalty").As<Napi::Number>().FloatValue();
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
if (options.Has("grammarEvaluationState")) {
|
|
271
|
+
grammar_evaluation_state =
|
|
272
|
+
Napi::ObjectWrap<AddonGrammarEvaluationState>::Unwrap(options.Get("grammarEvaluationState").As<Napi::Object>());
|
|
273
|
+
grammar_evaluation_state->Ref();
|
|
274
|
+
use_grammar = true;
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
~AddonContextSampleTokenWorker() {
|
|
279
|
+
ctx->Unref();
|
|
280
|
+
|
|
281
|
+
if (use_grammar) {
|
|
282
|
+
grammar_evaluation_state->Unref();
|
|
283
|
+
use_grammar = false;
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
Napi::Promise GetPromise() {
|
|
288
|
+
return deferred.Promise();
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
protected:
|
|
292
|
+
Napi::Promise::Deferred deferred;
|
|
293
|
+
|
|
294
|
+
void Execute() {
|
|
295
|
+
try {
|
|
296
|
+
SampleToken();
|
|
297
|
+
} catch (const std::exception& e) {
|
|
298
|
+
SetError(e.what());
|
|
299
|
+
} catch(...) {
|
|
300
|
+
SetError("Unknown error when calling \"SampleToken\"");
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
void SampleToken() {
|
|
305
|
+
llama_token new_token_id = 0;
|
|
306
|
+
|
|
307
|
+
// Select the best prediction.
|
|
308
|
+
if (llama_get_logits(ctx->ctx) == nullptr) {
|
|
309
|
+
SetError("This model does not support token generation");
|
|
310
|
+
return;
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
auto logits = llama_get_logits_ith(ctx->ctx, batchLogitIndex);
|
|
314
|
+
auto n_vocab = llama_n_vocab(ctx->model->model);
|
|
315
|
+
|
|
316
|
+
std::vector<llama_token_data> candidates;
|
|
317
|
+
candidates.reserve(n_vocab);
|
|
318
|
+
|
|
319
|
+
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
|
|
320
|
+
auto logit = logits[token_id];
|
|
321
|
+
|
|
322
|
+
if (useTokenBiases) {
|
|
323
|
+
bool hasTokenBias = tokenBiases.find(token_id) != tokenBiases.end();
|
|
324
|
+
if (hasTokenBias) {
|
|
325
|
+
auto logitBias = tokenBiases.at(token_id);
|
|
326
|
+
if (logitBias == -INFINITY || logitBias < -INFINITY) {
|
|
327
|
+
if (!llama_token_is_eog(ctx->model->model, token_id)) {
|
|
328
|
+
logit = -INFINITY;
|
|
329
|
+
}
|
|
330
|
+
} else {
|
|
331
|
+
logit += logitBias;
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
candidates.emplace_back(llama_token_data { token_id, logit, 0.0f });
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
|
340
|
+
|
|
341
|
+
if (use_repeat_penalty && !repeat_penalty_tokens.empty()) {
|
|
342
|
+
llama_sample_repetition_penalties(
|
|
343
|
+
ctx->ctx,
|
|
344
|
+
&candidates_p,
|
|
345
|
+
repeat_penalty_tokens.data(),
|
|
346
|
+
repeat_penalty_tokens.size(),
|
|
347
|
+
repeat_penalty,
|
|
348
|
+
repeat_penalty_frequency_penalty,
|
|
349
|
+
repeat_penalty_presence_penalty
|
|
350
|
+
);
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
if (use_grammar && (grammar_evaluation_state)->grammar != nullptr) {
|
|
354
|
+
llama_grammar_sample((grammar_evaluation_state)->grammar, ctx->ctx, &candidates_p);
|
|
355
|
+
|
|
356
|
+
if ((candidates_p.size == 0 || candidates_p.data[0].logit == -INFINITY) && useTokenBiases) {
|
|
357
|
+
// logit biases caused grammar sampling to fail, so sampling again without logit biases
|
|
358
|
+
useTokenBiases = false;
|
|
359
|
+
SampleToken();
|
|
360
|
+
return;
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
if (temperature <= 0) {
|
|
365
|
+
new_token_id = llama_sample_token_greedy(ctx->ctx, &candidates_p);
|
|
366
|
+
} else {
|
|
367
|
+
const int32_t resolved_top_k =
|
|
368
|
+
top_k <= 0 ? llama_n_vocab(ctx->model->model) : std::min(top_k, llama_n_vocab(ctx->model->model));
|
|
369
|
+
const int32_t n_probs = 0; // Number of probabilities to keep - 0 = disabled
|
|
370
|
+
const float tfs_z = 1.00f; // Tail free sampling - 1.0 = disabled
|
|
371
|
+
const float typical_p = 1.00f; // Typical probability - 1.0 = disabled
|
|
372
|
+
const float resolved_top_p = top_p; // Top p sampling - 1.0 = disabled
|
|
373
|
+
|
|
374
|
+
// Temperature sampling
|
|
375
|
+
size_t min_keep = std::max(1, n_probs);
|
|
376
|
+
llama_sample_top_k(ctx->ctx, &candidates_p, resolved_top_k, min_keep);
|
|
377
|
+
llama_sample_tail_free(ctx->ctx, &candidates_p, tfs_z, min_keep);
|
|
378
|
+
llama_sample_typical(ctx->ctx, &candidates_p, typical_p, min_keep);
|
|
379
|
+
llama_sample_top_p(ctx->ctx, &candidates_p, resolved_top_p, min_keep);
|
|
380
|
+
llama_sample_min_p(ctx->ctx, &candidates_p, min_p, min_keep);
|
|
381
|
+
llama_sample_temp(ctx->ctx, &candidates_p, temperature);
|
|
382
|
+
new_token_id = llama_sample_token(ctx->ctx, &candidates_p);
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
if (!llama_token_is_eog(ctx->model->model, new_token_id) && use_grammar && (grammar_evaluation_state)->grammar != nullptr) {
|
|
386
|
+
llama_grammar_accept_token((grammar_evaluation_state)->grammar, ctx->ctx, new_token_id);
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
result = new_token_id;
|
|
390
|
+
}
|
|
391
|
+
void OnOK() {
|
|
392
|
+
Napi::Number resultValue = Napi::Number::New(Env(), static_cast<uint32_t>(result));
|
|
393
|
+
deferred.Resolve(resultValue);
|
|
394
|
+
}
|
|
395
|
+
void OnError(const Napi::Error& err) {
|
|
396
|
+
deferred.Reject(err.Value());
|
|
397
|
+
}
|
|
398
|
+
};
|
|
399
|
+
|
|
400
|
+
AddonContext::AddonContext(const Napi::CallbackInfo& info) : Napi::ObjectWrap<AddonContext>(info) {
|
|
401
|
+
batchMemorySize = 0;
|
|
402
|
+
has_batch = false;
|
|
403
|
+
batch_n_tokens = 0;
|
|
404
|
+
n_cur = 0;
|
|
405
|
+
|
|
406
|
+
uint64_t loadedContextMemorySize = 0;
|
|
407
|
+
bool contextLoaded = false;
|
|
408
|
+
|
|
409
|
+
bool disposed = false;
|
|
410
|
+
|
|
411
|
+
model = Napi::ObjectWrap<AddonModel>::Unwrap(info[0].As<Napi::Object>());
|
|
412
|
+
model->Ref();
|
|
413
|
+
|
|
414
|
+
context_params = llama_context_default_params();
|
|
415
|
+
context_params.seed = -1;
|
|
416
|
+
context_params.n_ctx = 4096;
|
|
417
|
+
context_params.n_threads = 6;
|
|
418
|
+
context_params.n_threads_batch = context_params.n_threads;
|
|
419
|
+
|
|
420
|
+
if (info.Length() > 1 && info[1].IsObject()) {
|
|
421
|
+
Napi::Object options = info[1].As<Napi::Object>();
|
|
422
|
+
|
|
423
|
+
if (options.Has("noSeed")) {
|
|
424
|
+
context_params.seed = time(NULL);
|
|
425
|
+
} else if (options.Has("seed")) {
|
|
426
|
+
context_params.seed = options.Get("seed").As<Napi::Number>().Uint32Value();
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
if (options.Has("contextSize")) {
|
|
430
|
+
context_params.n_ctx = options.Get("contextSize").As<Napi::Number>().Uint32Value();
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
if (options.Has("batchSize")) {
|
|
434
|
+
context_params.n_batch = options.Get("batchSize").As<Napi::Number>().Uint32Value();
|
|
435
|
+
context_params.n_ubatch = context_params.n_batch; // the batch queue is managed in the JS side, so there's no need for managing it on the C++ side
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
if (options.Has("sequences")) {
|
|
439
|
+
context_params.n_seq_max = options.Get("sequences").As<Napi::Number>().Uint32Value();
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
if (options.Has("embeddings")) {
|
|
443
|
+
context_params.embeddings = options.Get("embeddings").As<Napi::Boolean>().Value();
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
if (options.Has("flashAttention")) {
|
|
447
|
+
context_params.flash_attn = options.Get("flashAttention").As<Napi::Boolean>().Value();
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
if (options.Has("threads")) {
|
|
451
|
+
const auto n_threads = options.Get("threads").As<Napi::Number>().Uint32Value();
|
|
452
|
+
const auto resolved_n_threads = n_threads == 0 ? std::thread::hardware_concurrency() : n_threads;
|
|
453
|
+
|
|
454
|
+
context_params.n_threads = resolved_n_threads;
|
|
455
|
+
context_params.n_threads_batch = resolved_n_threads;
|
|
456
|
+
}
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
AddonContext::~AddonContext() {
|
|
460
|
+
dispose();
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
void AddonContext::dispose() {
|
|
464
|
+
if (disposed) {
|
|
465
|
+
return;
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
disposed = true;
|
|
469
|
+
if (contextLoaded) {
|
|
470
|
+
contextLoaded = false;
|
|
471
|
+
llama_free(ctx);
|
|
472
|
+
|
|
473
|
+
adjustNapiExternalMemorySubtract(Env(), loadedContextMemorySize);
|
|
474
|
+
loadedContextMemorySize = 0;
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
model->Unref();
|
|
478
|
+
|
|
479
|
+
disposeBatch();
|
|
480
|
+
}
|
|
481
|
+
void AddonContext::disposeBatch() {
|
|
482
|
+
if (!has_batch) {
|
|
483
|
+
return;
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
llama_batch_free(batch);
|
|
487
|
+
has_batch = false;
|
|
488
|
+
batch_n_tokens = 0;
|
|
489
|
+
|
|
490
|
+
adjustNapiExternalMemorySubtract(Env(), batchMemorySize);
|
|
491
|
+
batchMemorySize = 0;
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
Napi::Value AddonContext::Init(const Napi::CallbackInfo& info) {
|
|
495
|
+
if (disposed) {
|
|
496
|
+
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
497
|
+
return info.Env().Undefined();
|
|
498
|
+
}
|
|
499
|
+
|
|
500
|
+
AddonContextLoadContextWorker* worker = new AddonContextLoadContextWorker(this->Env(), this);
|
|
501
|
+
worker->Queue();
|
|
502
|
+
return worker->GetPromise();
|
|
503
|
+
}
|
|
504
|
+
Napi::Value AddonContext::Dispose(const Napi::CallbackInfo& info) {
|
|
505
|
+
if (disposed) {
|
|
506
|
+
return info.Env().Undefined();
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
if (contextLoaded) {
|
|
510
|
+
contextLoaded = false;
|
|
511
|
+
|
|
512
|
+
AddonContextUnloadContextWorker* worker = new AddonContextUnloadContextWorker(this->Env(), this);
|
|
513
|
+
worker->Queue();
|
|
514
|
+
return worker->GetPromise();
|
|
515
|
+
} else {
|
|
516
|
+
dispose();
|
|
517
|
+
|
|
518
|
+
Napi::Promise::Deferred deferred = Napi::Promise::Deferred::New(info.Env());
|
|
519
|
+
deferred.Resolve(info.Env().Undefined());
|
|
520
|
+
return deferred.Promise();
|
|
521
|
+
}
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
Napi::Value AddonContext::GetContextSize(const Napi::CallbackInfo& info) {
|
|
525
|
+
if (disposed) {
|
|
526
|
+
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
527
|
+
return info.Env().Undefined();
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
return Napi::Number::From(info.Env(), llama_n_ctx(ctx));
|
|
531
|
+
}
|
|
532
|
+
Napi::Value AddonContext::InitBatch(const Napi::CallbackInfo& info) {
|
|
533
|
+
if (disposed) {
|
|
534
|
+
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
535
|
+
return info.Env().Undefined();
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
if (has_batch) {
|
|
539
|
+
llama_batch_free(batch);
|
|
540
|
+
}
|
|
541
|
+
|
|
542
|
+
int32_t n_tokens = info[0].As<Napi::Number>().Int32Value();
|
|
543
|
+
|
|
544
|
+
batch = llama_batch_init(n_tokens, 0, 1);
|
|
545
|
+
has_batch = true;
|
|
546
|
+
batch_n_tokens = n_tokens;
|
|
547
|
+
|
|
548
|
+
uint64_t newBatchMemorySize = calculateBatchMemorySize(n_tokens, llama_n_embd(model->model), context_params.n_batch);
|
|
549
|
+
if (newBatchMemorySize > batchMemorySize) {
|
|
550
|
+
adjustNapiExternalMemoryAdd(Env(), newBatchMemorySize - batchMemorySize);
|
|
551
|
+
batchMemorySize = newBatchMemorySize;
|
|
552
|
+
} else if (newBatchMemorySize < batchMemorySize) {
|
|
553
|
+
adjustNapiExternalMemorySubtract(Env(), batchMemorySize - newBatchMemorySize);
|
|
554
|
+
batchMemorySize = newBatchMemorySize;
|
|
555
|
+
}
|
|
556
|
+
|
|
557
|
+
return info.Env().Undefined();
|
|
558
|
+
}
|
|
559
|
+
Napi::Value AddonContext::DisposeBatch(const Napi::CallbackInfo& info) {
|
|
560
|
+
if (disposed) {
|
|
561
|
+
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
562
|
+
return info.Env().Undefined();
|
|
563
|
+
}
|
|
564
|
+
|
|
565
|
+
disposeBatch();
|
|
566
|
+
|
|
567
|
+
return info.Env().Undefined();
|
|
568
|
+
}
|
|
569
|
+
Napi::Value AddonContext::AddToBatch(const Napi::CallbackInfo& info) {
|
|
570
|
+
if (!has_batch) {
|
|
571
|
+
Napi::Error::New(info.Env(), "No batch is initialized").ThrowAsJavaScriptException();
|
|
572
|
+
return info.Env().Undefined();
|
|
573
|
+
}
|
|
574
|
+
|
|
575
|
+
int32_t sequenceId = info[0].As<Napi::Number>().Int32Value();
|
|
576
|
+
int32_t firstTokenContextIndex = info[1].As<Napi::Number>().Int32Value();
|
|
577
|
+
Napi::Uint32Array tokens = info[2].As<Napi::Uint32Array>();
|
|
578
|
+
bool generateLogitAtTheEnd = info[3].As<Napi::Boolean>().Value();
|
|
579
|
+
|
|
580
|
+
auto tokensLength = tokens.ElementLength();
|
|
581
|
+
GGML_ASSERT(batch.n_tokens + tokensLength <= batch_n_tokens);
|
|
582
|
+
|
|
583
|
+
for (size_t i = 0; i < tokensLength; i++) {
|
|
584
|
+
llama_batch_add(batch, static_cast<llama_token>(tokens[i]), firstTokenContextIndex + i, { sequenceId }, false);
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
if (generateLogitAtTheEnd) {
|
|
588
|
+
batch.logits[batch.n_tokens - 1] = true;
|
|
589
|
+
|
|
590
|
+
auto logit_index = batch.n_tokens - 1;
|
|
591
|
+
|
|
592
|
+
return Napi::Number::From(info.Env(), logit_index);
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
return info.Env().Undefined();
|
|
596
|
+
}
|
|
597
|
+
Napi::Value AddonContext::DisposeSequence(const Napi::CallbackInfo& info) {
|
|
598
|
+
if (disposed) {
|
|
599
|
+
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
600
|
+
return info.Env().Undefined();
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
int32_t sequenceId = info[0].As<Napi::Number>().Int32Value();
|
|
604
|
+
|
|
605
|
+
bool result = llama_kv_cache_seq_rm(ctx, sequenceId, -1, -1);
|
|
606
|
+
|
|
607
|
+
if (!result) {
|
|
608
|
+
Napi::Error::New(info.Env(), "Failed to dispose sequence").ThrowAsJavaScriptException();
|
|
609
|
+
return info.Env().Undefined();
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
return info.Env().Undefined();
|
|
613
|
+
}
|
|
614
|
+
Napi::Value AddonContext::RemoveTokenCellsFromSequence(const Napi::CallbackInfo& info) {
|
|
615
|
+
if (disposed) {
|
|
616
|
+
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
617
|
+
return info.Env().Undefined();
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
int32_t sequenceId = info[0].As<Napi::Number>().Int32Value();
|
|
621
|
+
int32_t startPos = info[1].As<Napi::Number>().Int32Value();
|
|
622
|
+
int32_t endPos = info[2].As<Napi::Number>().Int32Value();
|
|
623
|
+
|
|
624
|
+
bool result = llama_kv_cache_seq_rm(ctx, sequenceId, startPos, endPos);
|
|
625
|
+
|
|
626
|
+
return Napi::Boolean::New(info.Env(), result);
|
|
627
|
+
}
|
|
628
|
+
Napi::Value AddonContext::ShiftSequenceTokenCells(const Napi::CallbackInfo& info) {
|
|
629
|
+
if (disposed) {
|
|
630
|
+
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
631
|
+
return info.Env().Undefined();
|
|
632
|
+
}
|
|
633
|
+
|
|
634
|
+
int32_t sequenceId = info[0].As<Napi::Number>().Int32Value();
|
|
635
|
+
int32_t startPos = info[1].As<Napi::Number>().Int32Value();
|
|
636
|
+
int32_t endPos = info[2].As<Napi::Number>().Int32Value();
|
|
637
|
+
int32_t shiftDelta = info[3].As<Napi::Number>().Int32Value();
|
|
638
|
+
|
|
639
|
+
llama_kv_cache_seq_add(ctx, sequenceId, startPos, endPos, shiftDelta);
|
|
640
|
+
|
|
641
|
+
return info.Env().Undefined();
|
|
642
|
+
}
|
|
643
|
+
Napi::Value AddonContext::DecodeBatch(const Napi::CallbackInfo& info) {
|
|
644
|
+
AddonContextDecodeBatchWorker* worker = new AddonContextDecodeBatchWorker(info.Env(), this);
|
|
645
|
+
worker->Queue();
|
|
646
|
+
return worker->GetPromise();
|
|
647
|
+
}
|
|
648
|
+
Napi::Value AddonContext::SampleToken(const Napi::CallbackInfo& info) {
|
|
649
|
+
AddonContextSampleTokenWorker* worker = new AddonContextSampleTokenWorker(info, this);
|
|
650
|
+
worker->Queue();
|
|
651
|
+
return worker->GetPromise();
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
Napi::Value AddonContext::AcceptGrammarEvaluationStateToken(const Napi::CallbackInfo& info) {
|
|
655
|
+
AddonGrammarEvaluationState* grammar_evaluation_state =
|
|
656
|
+
Napi::ObjectWrap<AddonGrammarEvaluationState>::Unwrap(info[0].As<Napi::Object>());
|
|
657
|
+
llama_token tokenId = info[1].As<Napi::Number>().Int32Value();
|
|
658
|
+
|
|
659
|
+
if ((grammar_evaluation_state)->grammar != nullptr) {
|
|
660
|
+
llama_grammar_accept_token((grammar_evaluation_state)->grammar, ctx, tokenId);
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
return info.Env().Undefined();
|
|
664
|
+
}
|
|
665
|
+
|
|
666
|
+
Napi::Value AddonContext::CanBeNextTokenForGrammarEvaluationState(const Napi::CallbackInfo& info) {
|
|
667
|
+
AddonGrammarEvaluationState* grammar_evaluation_state =
|
|
668
|
+
Napi::ObjectWrap<AddonGrammarEvaluationState>::Unwrap(info[0].As<Napi::Object>());
|
|
669
|
+
llama_token tokenId = info[1].As<Napi::Number>().Int32Value();
|
|
670
|
+
|
|
671
|
+
if ((grammar_evaluation_state)->grammar != nullptr) {
|
|
672
|
+
std::vector<llama_token_data> candidates;
|
|
673
|
+
candidates.reserve(1);
|
|
674
|
+
candidates.emplace_back(llama_token_data { tokenId, 1, 0.0f });
|
|
675
|
+
|
|
676
|
+
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
|
677
|
+
|
|
678
|
+
llama_grammar_sample((grammar_evaluation_state)->grammar, ctx, &candidates_p);
|
|
679
|
+
|
|
680
|
+
if (candidates_p.size == 0 || candidates_p.data[0].logit == -INFINITY) {
|
|
681
|
+
return Napi::Boolean::New(info.Env(), false);
|
|
682
|
+
}
|
|
683
|
+
|
|
684
|
+
return Napi::Boolean::New(info.Env(), true);
|
|
685
|
+
}
|
|
686
|
+
|
|
687
|
+
return Napi::Boolean::New(info.Env(), false);
|
|
688
|
+
}
|
|
689
|
+
|
|
690
|
+
Napi::Value AddonContext::GetEmbedding(const Napi::CallbackInfo& info) {
|
|
691
|
+
if (disposed) {
|
|
692
|
+
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
693
|
+
return info.Env().Undefined();
|
|
694
|
+
}
|
|
695
|
+
|
|
696
|
+
int32_t inputTokensLength = info[0].As<Napi::Number>().Int32Value();
|
|
697
|
+
|
|
698
|
+
if (inputTokensLength <= 0) {
|
|
699
|
+
Napi::Error::New(info.Env(), "Invalid input tokens length").ThrowAsJavaScriptException();
|
|
700
|
+
return info.Env().Undefined();
|
|
701
|
+
}
|
|
702
|
+
|
|
703
|
+
const int n_embd = llama_n_embd(model->model);
|
|
704
|
+
const auto* embeddings = llama_get_embeddings_seq(ctx, 0);
|
|
705
|
+
if (embeddings == NULL) {
|
|
706
|
+
embeddings = llama_get_embeddings_ith(ctx, inputTokensLength - 1);
|
|
707
|
+
|
|
708
|
+
if (embeddings == NULL) {
|
|
709
|
+
Napi::Error::New(info.Env(), std::string("Failed to get embeddings for token ") + std::to_string(inputTokensLength - 1)).ThrowAsJavaScriptException();
|
|
710
|
+
return info.Env().Undefined();
|
|
711
|
+
}
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
Napi::Float64Array result = Napi::Float64Array::New(info.Env(), n_embd);
|
|
715
|
+
for (size_t i = 0; i < n_embd; ++i) {
|
|
716
|
+
result[i] = embeddings[i];
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
return result;
|
|
720
|
+
}
|
|
721
|
+
|
|
722
|
+
Napi::Value AddonContext::GetStateSize(const Napi::CallbackInfo& info) {
|
|
723
|
+
if (disposed) {
|
|
724
|
+
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
725
|
+
return info.Env().Undefined();
|
|
726
|
+
}
|
|
727
|
+
|
|
728
|
+
return Napi::Number::From(info.Env(), llama_state_get_size(ctx));
|
|
729
|
+
}
|
|
730
|
+
|
|
731
|
+
Napi::Value AddonContext::PrintTimings(const Napi::CallbackInfo& info) {
|
|
732
|
+
llama_print_timings(ctx);
|
|
733
|
+
llama_reset_timings(ctx);
|
|
734
|
+
return info.Env().Undefined();
|
|
735
|
+
}
|
|
736
|
+
|
|
737
|
+
Napi::Value AddonContext::SetLora(const Napi::CallbackInfo& info) {
|
|
738
|
+
AddonModelLora* lora = Napi::ObjectWrap<AddonModelLora>::Unwrap(info[0].As<Napi::Object>());
|
|
739
|
+
float scale = info[1].As<Napi::Number>().FloatValue();
|
|
740
|
+
|
|
741
|
+
llama_lora_adapter_set(ctx, lora->lora_adapter, scale);
|
|
742
|
+
|
|
743
|
+
return info.Env().Undefined();
|
|
744
|
+
}
|
|
745
|
+
|
|
746
|
+
void AddonContext::init(Napi::Object exports) {
|
|
747
|
+
exports.Set(
|
|
748
|
+
"AddonContext",
|
|
749
|
+
DefineClass(
|
|
750
|
+
exports.Env(),
|
|
751
|
+
"AddonContext",
|
|
752
|
+
{
|
|
753
|
+
InstanceMethod("init", &AddonContext::Init),
|
|
754
|
+
InstanceMethod("getContextSize", &AddonContext::GetContextSize),
|
|
755
|
+
InstanceMethod("initBatch", &AddonContext::InitBatch),
|
|
756
|
+
InstanceMethod("addToBatch", &AddonContext::AddToBatch),
|
|
757
|
+
InstanceMethod("disposeSequence", &AddonContext::DisposeSequence),
|
|
758
|
+
InstanceMethod("removeTokenCellsFromSequence", &AddonContext::RemoveTokenCellsFromSequence),
|
|
759
|
+
InstanceMethod("shiftSequenceTokenCells", &AddonContext::ShiftSequenceTokenCells),
|
|
760
|
+
InstanceMethod("decodeBatch", &AddonContext::DecodeBatch),
|
|
761
|
+
InstanceMethod("sampleToken", &AddonContext::SampleToken),
|
|
762
|
+
InstanceMethod("acceptGrammarEvaluationStateToken", &AddonContext::AcceptGrammarEvaluationStateToken),
|
|
763
|
+
InstanceMethod("canBeNextTokenForGrammarEvaluationState", &AddonContext::CanBeNextTokenForGrammarEvaluationState),
|
|
764
|
+
InstanceMethod("getEmbedding", &AddonContext::GetEmbedding),
|
|
765
|
+
InstanceMethod("getStateSize", &AddonContext::GetStateSize),
|
|
766
|
+
InstanceMethod("printTimings", &AddonContext::PrintTimings),
|
|
767
|
+
InstanceMethod("setLora", &AddonContext::SetLora),
|
|
768
|
+
InstanceMethod("dispose", &AddonContext::Dispose),
|
|
769
|
+
}
|
|
770
|
+
)
|
|
771
|
+
);
|
|
772
|
+
}
|