node-llama-cpp 3.0.0-beta.38 → 3.0.0-beta.39
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bins/linux-arm64/_nlcBuildMetadata.json +1 -1
- package/bins/linux-arm64/libggml.so +0 -0
- package/bins/linux-arm64/libllama.so +0 -0
- package/bins/linux-arm64/llama-addon.node +0 -0
- package/bins/linux-armv7l/_nlcBuildMetadata.json +1 -1
- package/bins/linux-armv7l/libggml.so +0 -0
- package/bins/linux-armv7l/libllama.so +0 -0
- package/bins/linux-armv7l/llama-addon.node +0 -0
- package/bins/linux-x64/_nlcBuildMetadata.json +1 -1
- package/bins/linux-x64/libggml.so +0 -0
- package/bins/linux-x64/libllama.so +0 -0
- package/bins/linux-x64/llama-addon.node +0 -0
- package/bins/linux-x64-vulkan/_nlcBuildMetadata.json +1 -1
- package/bins/linux-x64-vulkan/libggml.so +0 -0
- package/bins/linux-x64-vulkan/libllama.so +0 -0
- package/bins/linux-x64-vulkan/llama-addon.node +0 -0
- package/bins/linux-x64-vulkan/vulkan-shaders-gen +0 -0
- package/bins/mac-arm64-metal/_nlcBuildMetadata.json +1 -1
- package/bins/mac-arm64-metal/ggml-common.h +24 -0
- package/bins/mac-arm64-metal/ggml-metal.metal +181 -552
- package/bins/mac-arm64-metal/libggml.dylib +0 -0
- package/bins/mac-arm64-metal/libllama.dylib +0 -0
- package/bins/mac-arm64-metal/llama-addon.node +0 -0
- package/bins/mac-x64/_nlcBuildMetadata.json +1 -1
- package/bins/mac-x64/libggml.dylib +0 -0
- package/bins/mac-x64/libllama.dylib +0 -0
- package/bins/mac-x64/llama-addon.node +0 -0
- package/bins/win-arm64/_nlcBuildMetadata.json +1 -1
- package/bins/win-arm64/ggml.dll +0 -0
- package/bins/win-arm64/llama-addon.exp +0 -0
- package/bins/win-arm64/llama-addon.lib +0 -0
- package/bins/win-arm64/llama-addon.node +0 -0
- package/bins/win-arm64/llama.dll +0 -0
- package/bins/win-x64/_nlcBuildMetadata.json +1 -1
- package/bins/win-x64/ggml.dll +0 -0
- package/bins/win-x64/llama-addon.node +0 -0
- package/bins/win-x64/llama.dll +0 -0
- package/bins/win-x64-vulkan/_nlcBuildMetadata.json +1 -1
- package/bins/win-x64-vulkan/ggml.dll +0 -0
- package/bins/win-x64-vulkan/llama-addon.node +0 -0
- package/bins/win-x64-vulkan/llama.dll +0 -0
- package/bins/win-x64-vulkan/vulkan-shaders-gen.exe +0 -0
- package/dist/ChatWrapper.d.ts +2 -1
- package/dist/ChatWrapper.js +19 -5
- package/dist/ChatWrapper.js.map +1 -1
- package/dist/bindings/AddonTypes.d.ts +13 -2
- package/dist/bindings/getLlama.d.ts +3 -2
- package/dist/bindings/getLlama.js +1 -1
- package/dist/bindings/getLlama.js.map +1 -1
- package/dist/chatWrappers/FunctionaryChatWrapper.js +8 -5
- package/dist/chatWrappers/FunctionaryChatWrapper.js.map +1 -1
- package/dist/chatWrappers/GemmaChatWrapper.js +1 -1
- package/dist/chatWrappers/GemmaChatWrapper.js.map +1 -1
- package/dist/chatWrappers/Llama3ChatWrapper.js +3 -4
- package/dist/chatWrappers/Llama3ChatWrapper.js.map +1 -1
- package/dist/chatWrappers/Llama3_1ChatWrapper.d.ts +31 -0
- package/dist/chatWrappers/Llama3_1ChatWrapper.js +223 -0
- package/dist/chatWrappers/Llama3_1ChatWrapper.js.map +1 -0
- package/dist/chatWrappers/utils/ChatModelFunctionsDocumentationGenerator.d.ts +17 -2
- package/dist/chatWrappers/utils/ChatModelFunctionsDocumentationGenerator.js +39 -2
- package/dist/chatWrappers/utils/ChatModelFunctionsDocumentationGenerator.js.map +1 -1
- package/dist/chatWrappers/utils/jsonDumps.d.ts +7 -0
- package/dist/chatWrappers/utils/jsonDumps.js +18 -0
- package/dist/chatWrappers/utils/jsonDumps.js.map +1 -0
- package/dist/chatWrappers/utils/resolveChatWrapper.d.ts +5 -3
- package/dist/chatWrappers/utils/resolveChatWrapper.js +50 -4
- package/dist/chatWrappers/utils/resolveChatWrapper.js.map +1 -1
- package/dist/cli/commands/ChatCommand.d.ts +1 -1
- package/dist/cli/commands/ChatCommand.js +2 -4
- package/dist/cli/commands/ChatCommand.js.map +1 -1
- package/dist/cli/commands/CompleteCommand.js +2 -2
- package/dist/cli/commands/CompleteCommand.js.map +1 -1
- package/dist/cli/commands/InfillCommand.js +2 -2
- package/dist/cli/commands/InfillCommand.js.map +1 -1
- package/dist/cli/recommendedModels.js +43 -24
- package/dist/cli/recommendedModels.js.map +1 -1
- package/dist/consts.d.ts +1 -0
- package/dist/consts.js +1 -0
- package/dist/consts.js.map +1 -1
- package/dist/evaluator/LlamaChat/LlamaChat.d.ts +22 -0
- package/dist/evaluator/LlamaChat/LlamaChat.js +65 -34
- package/dist/evaluator/LlamaChat/LlamaChat.js.map +1 -1
- package/dist/evaluator/LlamaChatSession/LlamaChatSession.d.ts +28 -6
- package/dist/evaluator/LlamaChatSession/LlamaChatSession.js +22 -16
- package/dist/evaluator/LlamaChatSession/LlamaChatSession.js.map +1 -1
- package/dist/evaluator/LlamaChatSession/utils/LlamaChatSessionPromptCompletionEngine.js +4 -5
- package/dist/evaluator/LlamaChatSession/utils/LlamaChatSessionPromptCompletionEngine.js.map +1 -1
- package/dist/evaluator/LlamaCompletion.d.ts +13 -2
- package/dist/evaluator/LlamaCompletion.js +10 -5
- package/dist/evaluator/LlamaCompletion.js.map +1 -1
- package/dist/evaluator/LlamaContext/LlamaContext.d.ts +1 -1
- package/dist/evaluator/LlamaContext/LlamaContext.js +60 -0
- package/dist/evaluator/LlamaContext/LlamaContext.js.map +1 -1
- package/dist/evaluator/LlamaContext/types.d.ts +21 -0
- package/dist/evaluator/LlamaGrammar.d.ts +6 -3
- package/dist/evaluator/LlamaGrammar.js +2 -2
- package/dist/evaluator/LlamaGrammar.js.map +1 -1
- package/dist/evaluator/LlamaModel/LlamaModel.d.ts +16 -32
- package/dist/evaluator/LlamaModel/LlamaModel.js +94 -53
- package/dist/evaluator/LlamaModel/LlamaModel.js.map +1 -1
- package/dist/gguf/consts.d.ts +1 -0
- package/dist/gguf/consts.js +4 -0
- package/dist/gguf/consts.js.map +1 -1
- package/dist/gguf/insights/GgufInsights.js +4 -0
- package/dist/gguf/insights/GgufInsights.js.map +1 -1
- package/dist/gguf/parser/GgufV2Parser.js +3 -1
- package/dist/gguf/parser/GgufV2Parser.js.map +1 -1
- package/dist/gguf/types/GgufMetadataTypes.d.ts +16 -0
- package/dist/gguf/types/GgufMetadataTypes.js.map +1 -1
- package/dist/gguf/utils/convertMetadataKeyValueRecordToNestedObject.d.ts +3 -2
- package/dist/gguf/utils/convertMetadataKeyValueRecordToNestedObject.js +44 -8
- package/dist/gguf/utils/convertMetadataKeyValueRecordToNestedObject.js.map +1 -1
- package/dist/index.d.ts +4 -2
- package/dist/index.js +3 -1
- package/dist/index.js.map +1 -1
- package/dist/types.d.ts +15 -1
- package/dist/types.js.map +1 -1
- package/dist/utils/DeepPartialObject.d.ts +3 -0
- package/dist/utils/DeepPartialObject.js +2 -0
- package/dist/utils/DeepPartialObject.js.map +1 -0
- package/dist/utils/StopGenerationDetector.d.ts +6 -3
- package/dist/utils/StopGenerationDetector.js +22 -7
- package/dist/utils/StopGenerationDetector.js.map +1 -1
- package/dist/utils/TokenStreamRegulator.d.ts +1 -0
- package/dist/utils/TokenStreamRegulator.js +23 -5
- package/dist/utils/TokenStreamRegulator.js.map +1 -1
- package/dist/utils/resolveLastTokens.d.ts +2 -0
- package/dist/utils/resolveLastTokens.js +12 -0
- package/dist/utils/resolveLastTokens.js.map +1 -0
- package/llama/CMakeLists.txt +1 -1
- package/llama/addon/AddonContext.cpp +772 -0
- package/llama/addon/AddonContext.h +53 -0
- package/llama/addon/AddonGrammar.cpp +44 -0
- package/llama/addon/AddonGrammar.h +18 -0
- package/llama/addon/AddonGrammarEvaluationState.cpp +28 -0
- package/llama/addon/AddonGrammarEvaluationState.h +15 -0
- package/llama/addon/AddonModel.cpp +681 -0
- package/llama/addon/AddonModel.h +61 -0
- package/llama/addon/AddonModelData.cpp +25 -0
- package/llama/addon/AddonModelData.h +15 -0
- package/llama/addon/AddonModelLora.cpp +107 -0
- package/llama/addon/AddonModelLora.h +28 -0
- package/llama/addon/addon.cpp +217 -0
- package/llama/addon/addonGlobals.cpp +22 -0
- package/llama/addon/addonGlobals.h +12 -0
- package/llama/addon/globals/addonLog.cpp +135 -0
- package/llama/addon/globals/addonLog.h +21 -0
- package/llama/addon/globals/addonProgress.cpp +15 -0
- package/llama/addon/globals/addonProgress.h +15 -0
- package/llama/addon/globals/getGpuInfo.cpp +108 -0
- package/llama/addon/globals/getGpuInfo.h +6 -0
- package/llama/binariesGithubRelease.json +1 -1
- package/llama/gitRelease.bundle +0 -0
- package/llama/grammars/README.md +1 -1
- package/llama/llama.cpp.info.json +1 -1
- package/package.json +3 -3
- package/templates/packed/electron-typescript-react.json +1 -1
- package/templates/packed/node-typescript.json +1 -1
- package/llama/addon.cpp +0 -1997
package/llama/addon.cpp
DELETED
|
@@ -1,1997 +0,0 @@
|
|
|
1
|
-
#include <stddef.h>
|
|
2
|
-
|
|
3
|
-
#include <algorithm>
|
|
4
|
-
#include <sstream>
|
|
5
|
-
#include <vector>
|
|
6
|
-
#include <unordered_map>
|
|
7
|
-
|
|
8
|
-
#include "common.h"
|
|
9
|
-
#include "common/grammar-parser.h"
|
|
10
|
-
#include "llama.h"
|
|
11
|
-
#include "napi.h"
|
|
12
|
-
|
|
13
|
-
#ifdef GPU_INFO_USE_CUDA
|
|
14
|
-
# include "gpuInfo/cuda-gpu-info.h"
|
|
15
|
-
#endif
|
|
16
|
-
#ifdef GPU_INFO_USE_VULKAN
|
|
17
|
-
# include "gpuInfo/vulkan-gpu-info.h"
|
|
18
|
-
#endif
|
|
19
|
-
#ifdef GPU_INFO_USE_METAL
|
|
20
|
-
# include "gpuInfo/metal-gpu-info.h"
|
|
21
|
-
#endif
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
struct addon_logger_log {
|
|
25
|
-
public:
|
|
26
|
-
const int logLevelNumber;
|
|
27
|
-
const std::stringstream* stringStream;
|
|
28
|
-
};
|
|
29
|
-
|
|
30
|
-
static void addonLlamaCppLogCallback(ggml_log_level level, const char* text, void* user_data);
|
|
31
|
-
|
|
32
|
-
using AddonThreadSafeLogCallbackFunctionContext = Napi::Reference<Napi::Value>;
|
|
33
|
-
void addonCallJsLogCallback(
|
|
34
|
-
Napi::Env env, Napi::Function callback, AddonThreadSafeLogCallbackFunctionContext* context, addon_logger_log* data
|
|
35
|
-
);
|
|
36
|
-
using AddonThreadSafeLogCallbackFunction =
|
|
37
|
-
Napi::TypedThreadSafeFunction<AddonThreadSafeLogCallbackFunctionContext, addon_logger_log, addonCallJsLogCallback>;
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
struct addon_progress_event {
|
|
41
|
-
public:
|
|
42
|
-
const float progress;
|
|
43
|
-
};
|
|
44
|
-
|
|
45
|
-
using AddonThreadSafeProgressCallbackFunctionContext = Napi::Reference<Napi::Value>;
|
|
46
|
-
void addonCallJsProgressCallback(
|
|
47
|
-
Napi::Env env, Napi::Function callback, AddonThreadSafeProgressCallbackFunctionContext* context, addon_progress_event* data
|
|
48
|
-
);
|
|
49
|
-
using AddonThreadSafeProgressEventCallbackFunction =
|
|
50
|
-
Napi::TypedThreadSafeFunction<AddonThreadSafeProgressCallbackFunctionContext, addon_progress_event, addonCallJsProgressCallback>;
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
AddonThreadSafeLogCallbackFunction addonThreadSafeLoggerCallback;
|
|
54
|
-
bool addonJsLoggerCallbackSet = false;
|
|
55
|
-
int addonLoggerLogLevel = 5;
|
|
56
|
-
bool backendInitialized = false;
|
|
57
|
-
bool backendDisposed = false;
|
|
58
|
-
|
|
59
|
-
void addonCallJsProgressCallback(
|
|
60
|
-
Napi::Env env, Napi::Function callback, AddonThreadSafeProgressCallbackFunctionContext* context, addon_progress_event* data
|
|
61
|
-
) {
|
|
62
|
-
if (env != nullptr && callback != nullptr && addonJsLoggerCallbackSet) {
|
|
63
|
-
try {
|
|
64
|
-
callback.Call({Napi::Number::New(env, data->progress)});
|
|
65
|
-
} catch (const Napi::Error& e) {}
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
if (data != nullptr) {
|
|
69
|
-
delete data;
|
|
70
|
-
}
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
static uint64_t calculateBatchMemorySize(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) {
|
|
74
|
-
uint64_t totalSize = 0;
|
|
75
|
-
|
|
76
|
-
if (embd) {
|
|
77
|
-
totalSize += sizeof(float) * n_tokens_alloc * embd;
|
|
78
|
-
} else {
|
|
79
|
-
totalSize += sizeof(llama_token) * n_tokens_alloc;
|
|
80
|
-
}
|
|
81
|
-
|
|
82
|
-
totalSize += sizeof(llama_pos) * n_tokens_alloc;
|
|
83
|
-
totalSize += sizeof(int32_t) * n_tokens_alloc;
|
|
84
|
-
totalSize += sizeof(llama_seq_id *) * (n_tokens_alloc + 1);
|
|
85
|
-
|
|
86
|
-
totalSize += sizeof(llama_seq_id) * n_seq_max * n_tokens_alloc;
|
|
87
|
-
|
|
88
|
-
totalSize += sizeof(int8_t) * n_tokens_alloc;
|
|
89
|
-
|
|
90
|
-
return totalSize;
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
static void adjustNapiExternalMemoryAdd(Napi::Env env, uint64_t size) {
|
|
94
|
-
const uint64_t chunkSize = std::numeric_limits<int64_t>::max();
|
|
95
|
-
while (size > 0) {
|
|
96
|
-
int64_t adjustSize = std::min(size, chunkSize);
|
|
97
|
-
Napi::MemoryManagement::AdjustExternalMemory(env, adjustSize);
|
|
98
|
-
size -= adjustSize;
|
|
99
|
-
}
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
static void adjustNapiExternalMemorySubtract(Napi::Env env, uint64_t size) {
|
|
103
|
-
const uint64_t chunkSize = std::numeric_limits<int64_t>::max();
|
|
104
|
-
while (size > 0) {
|
|
105
|
-
int64_t adjustSize = std::min(size, chunkSize);
|
|
106
|
-
Napi::MemoryManagement::AdjustExternalMemory(env, -adjustSize);
|
|
107
|
-
size -= adjustSize;
|
|
108
|
-
}
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
#ifdef GPU_INFO_USE_CUDA
|
|
112
|
-
void logCudaError(const char* message) {
|
|
113
|
-
addonLlamaCppLogCallback(GGML_LOG_LEVEL_ERROR, (std::string("CUDA error: ") + std::string(message)).c_str(), nullptr);
|
|
114
|
-
}
|
|
115
|
-
#endif
|
|
116
|
-
#ifdef GPU_INFO_USE_VULKAN
|
|
117
|
-
void logVulkanWarning(const char* message) {
|
|
118
|
-
addonLlamaCppLogCallback(GGML_LOG_LEVEL_WARN, (std::string("Vulkan warning: ") + std::string(message)).c_str(), nullptr);
|
|
119
|
-
}
|
|
120
|
-
#endif
|
|
121
|
-
|
|
122
|
-
Napi::Value getGpuVramInfo(const Napi::CallbackInfo& info) {
|
|
123
|
-
uint64_t total = 0;
|
|
124
|
-
uint64_t used = 0;
|
|
125
|
-
|
|
126
|
-
#ifdef GPU_INFO_USE_CUDA
|
|
127
|
-
size_t cudaDeviceTotal = 0;
|
|
128
|
-
size_t cudaDeviceUsed = 0;
|
|
129
|
-
bool cudeGetInfoSuccess = gpuInfoGetTotalCudaDevicesInfo(&cudaDeviceTotal, &cudaDeviceUsed, logCudaError);
|
|
130
|
-
|
|
131
|
-
if (cudeGetInfoSuccess) {
|
|
132
|
-
total += cudaDeviceTotal;
|
|
133
|
-
used += cudaDeviceUsed;
|
|
134
|
-
}
|
|
135
|
-
#endif
|
|
136
|
-
|
|
137
|
-
#ifdef GPU_INFO_USE_VULKAN
|
|
138
|
-
uint64_t vulkanDeviceTotal = 0;
|
|
139
|
-
uint64_t vulkanDeviceUsed = 0;
|
|
140
|
-
const bool vulkanDeviceSupportsMemoryBudgetExtension = gpuInfoGetTotalVulkanDevicesInfo(&vulkanDeviceTotal, &vulkanDeviceUsed, logVulkanWarning);
|
|
141
|
-
|
|
142
|
-
if (vulkanDeviceSupportsMemoryBudgetExtension) {
|
|
143
|
-
total += vulkanDeviceTotal;
|
|
144
|
-
used += vulkanDeviceUsed;
|
|
145
|
-
}
|
|
146
|
-
#endif
|
|
147
|
-
|
|
148
|
-
#ifdef GPU_INFO_USE_METAL
|
|
149
|
-
uint64_t metalDeviceTotal = 0;
|
|
150
|
-
uint64_t metalDeviceUsed = 0;
|
|
151
|
-
getMetalGpuInfo(&metalDeviceTotal, &metalDeviceUsed);
|
|
152
|
-
|
|
153
|
-
total += metalDeviceTotal;
|
|
154
|
-
used += metalDeviceUsed;
|
|
155
|
-
#endif
|
|
156
|
-
|
|
157
|
-
Napi::Object result = Napi::Object::New(info.Env());
|
|
158
|
-
result.Set("total", Napi::Number::From(info.Env(), total));
|
|
159
|
-
result.Set("used", Napi::Number::From(info.Env(), used));
|
|
160
|
-
|
|
161
|
-
return result;
|
|
162
|
-
}
|
|
163
|
-
|
|
164
|
-
Napi::Value getGpuDeviceInfo(const Napi::CallbackInfo& info) {
|
|
165
|
-
std::vector<std::string> deviceNames;
|
|
166
|
-
|
|
167
|
-
#ifdef GPU_INFO_USE_CUDA
|
|
168
|
-
gpuInfoGetCudaDeviceNames(&deviceNames, logCudaError);
|
|
169
|
-
#endif
|
|
170
|
-
|
|
171
|
-
#ifdef GPU_INFO_USE_VULKAN
|
|
172
|
-
gpuInfoGetVulkanDeviceNames(&deviceNames, logVulkanWarning);
|
|
173
|
-
#endif
|
|
174
|
-
|
|
175
|
-
#ifdef GPU_INFO_USE_METAL
|
|
176
|
-
getMetalGpuDeviceNames(&deviceNames);
|
|
177
|
-
#endif
|
|
178
|
-
|
|
179
|
-
Napi::Object result = Napi::Object::New(info.Env());
|
|
180
|
-
|
|
181
|
-
Napi::Array deviceNamesNapiArray = Napi::Array::New(info.Env(), deviceNames.size());
|
|
182
|
-
for (size_t i = 0; i < deviceNames.size(); ++i) {
|
|
183
|
-
deviceNamesNapiArray[i] = Napi::String::New(info.Env(), deviceNames[i]);
|
|
184
|
-
}
|
|
185
|
-
result.Set("deviceNames", deviceNamesNapiArray);
|
|
186
|
-
|
|
187
|
-
return result;
|
|
188
|
-
}
|
|
189
|
-
|
|
190
|
-
Napi::Value getGpuType(const Napi::CallbackInfo& info) {
|
|
191
|
-
#ifdef GPU_INFO_USE_CUDA
|
|
192
|
-
return Napi::String::New(info.Env(), "cuda");
|
|
193
|
-
#endif
|
|
194
|
-
|
|
195
|
-
#ifdef GPU_INFO_USE_VULKAN
|
|
196
|
-
return Napi::String::New(info.Env(), "vulkan");
|
|
197
|
-
#endif
|
|
198
|
-
|
|
199
|
-
#ifdef GPU_INFO_USE_METAL
|
|
200
|
-
return Napi::String::New(info.Env(), "metal");
|
|
201
|
-
#endif
|
|
202
|
-
|
|
203
|
-
return info.Env().Undefined();
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
static Napi::Value getNapiToken(const Napi::CallbackInfo& info, llama_model* model, llama_token token) {
|
|
207
|
-
if (token < 0) {
|
|
208
|
-
return Napi::Number::From(info.Env(), -1);
|
|
209
|
-
}
|
|
210
|
-
|
|
211
|
-
auto tokenAttributes = llama_token_get_attr(model, token);
|
|
212
|
-
|
|
213
|
-
if (tokenAttributes & LLAMA_TOKEN_ATTR_UNDEFINED || tokenAttributes & LLAMA_TOKEN_ATTR_UNKNOWN) {
|
|
214
|
-
return Napi::Number::From(info.Env(), -1);
|
|
215
|
-
}
|
|
216
|
-
|
|
217
|
-
return Napi::Number::From(info.Env(), token);
|
|
218
|
-
}
|
|
219
|
-
|
|
220
|
-
static Napi::Value getNapiControlToken(const Napi::CallbackInfo& info, llama_model* model, llama_token token) {
|
|
221
|
-
if (token < 0) {
|
|
222
|
-
return Napi::Number::From(info.Env(), -1);
|
|
223
|
-
}
|
|
224
|
-
|
|
225
|
-
auto tokenAttributes = llama_token_get_attr(model, token);
|
|
226
|
-
|
|
227
|
-
if (!(tokenAttributes & LLAMA_TOKEN_ATTR_CONTROL) && !(tokenAttributes & LLAMA_TOKEN_ATTR_UNDEFINED)) {
|
|
228
|
-
return Napi::Number::From(info.Env(), -1);
|
|
229
|
-
}
|
|
230
|
-
|
|
231
|
-
return Napi::Number::From(info.Env(), token);
|
|
232
|
-
}
|
|
233
|
-
|
|
234
|
-
static bool llamaModelParamsProgressCallback(float progress, void * user_data);
|
|
235
|
-
|
|
236
|
-
class AddonModel : public Napi::ObjectWrap<AddonModel> {
|
|
237
|
-
public:
|
|
238
|
-
llama_model_params model_params;
|
|
239
|
-
llama_model* model;
|
|
240
|
-
uint64_t loadedModelSize = 0;
|
|
241
|
-
Napi::Reference<Napi::Object> addonExportsRef;
|
|
242
|
-
bool hasAddonExportsRef = false;
|
|
243
|
-
|
|
244
|
-
std::string modelPath;
|
|
245
|
-
bool modelLoaded = false;
|
|
246
|
-
bool abortModelLoad = false;
|
|
247
|
-
bool model_load_stopped = false;
|
|
248
|
-
float rawModelLoadPercentage = 0;
|
|
249
|
-
unsigned modelLoadPercentage = 0;
|
|
250
|
-
AddonThreadSafeProgressEventCallbackFunction addonThreadSafeOnLoadProgressEventCallback;
|
|
251
|
-
bool onLoadProgressEventCallbackSet = false;
|
|
252
|
-
bool hasLoadAbortSignal = false;
|
|
253
|
-
|
|
254
|
-
bool disposed = false;
|
|
255
|
-
|
|
256
|
-
AddonModel(const Napi::CallbackInfo& info) : Napi::ObjectWrap<AddonModel>(info) {
|
|
257
|
-
model_params = llama_model_default_params();
|
|
258
|
-
|
|
259
|
-
// Get the model path
|
|
260
|
-
modelPath = info[0].As<Napi::String>().Utf8Value();
|
|
261
|
-
|
|
262
|
-
if (info.Length() > 1 && info[1].IsObject()) {
|
|
263
|
-
Napi::Object options = info[1].As<Napi::Object>();
|
|
264
|
-
|
|
265
|
-
if (options.Has("addonExports")) {
|
|
266
|
-
addonExportsRef = Napi::Persistent(options.Get("addonExports").As<Napi::Object>());
|
|
267
|
-
hasAddonExportsRef = true;
|
|
268
|
-
}
|
|
269
|
-
|
|
270
|
-
if (options.Has("gpuLayers")) {
|
|
271
|
-
model_params.n_gpu_layers = options.Get("gpuLayers").As<Napi::Number>().Int32Value();
|
|
272
|
-
}
|
|
273
|
-
|
|
274
|
-
if (options.Has("vocabOnly")) {
|
|
275
|
-
model_params.vocab_only = options.Get("vocabOnly").As<Napi::Boolean>().Value();
|
|
276
|
-
}
|
|
277
|
-
|
|
278
|
-
if (options.Has("useMmap")) {
|
|
279
|
-
model_params.use_mmap = options.Get("useMmap").As<Napi::Boolean>().Value();
|
|
280
|
-
}
|
|
281
|
-
|
|
282
|
-
if (options.Has("useMlock")) {
|
|
283
|
-
model_params.use_mlock = options.Get("useMlock").As<Napi::Boolean>().Value();
|
|
284
|
-
}
|
|
285
|
-
|
|
286
|
-
if (options.Has("checkTensors")) {
|
|
287
|
-
model_params.check_tensors = options.Get("checkTensors").As<Napi::Boolean>().Value();
|
|
288
|
-
}
|
|
289
|
-
|
|
290
|
-
if (options.Has("onLoadProgress")) {
|
|
291
|
-
auto onLoadProgressJSCallback = options.Get("onLoadProgress").As<Napi::Function>();
|
|
292
|
-
if (onLoadProgressJSCallback.IsFunction()) {
|
|
293
|
-
AddonThreadSafeProgressCallbackFunctionContext* context = new Napi::Reference<Napi::Value>(Napi::Persistent(info.This()));
|
|
294
|
-
addonThreadSafeOnLoadProgressEventCallback = AddonThreadSafeProgressEventCallbackFunction::New(
|
|
295
|
-
info.Env(),
|
|
296
|
-
onLoadProgressJSCallback,
|
|
297
|
-
"onLoadProgressCallback",
|
|
298
|
-
0,
|
|
299
|
-
1,
|
|
300
|
-
context,
|
|
301
|
-
[](Napi::Env, AddonModel* addonModel, AddonThreadSafeProgressCallbackFunctionContext* ctx) {
|
|
302
|
-
addonModel->onLoadProgressEventCallbackSet = false;
|
|
303
|
-
|
|
304
|
-
delete ctx;
|
|
305
|
-
},
|
|
306
|
-
this
|
|
307
|
-
);
|
|
308
|
-
onLoadProgressEventCallbackSet = true;
|
|
309
|
-
}
|
|
310
|
-
}
|
|
311
|
-
|
|
312
|
-
if (options.Has("hasLoadAbortSignal")) {
|
|
313
|
-
hasLoadAbortSignal = options.Get("hasLoadAbortSignal").As<Napi::Boolean>().Value();
|
|
314
|
-
}
|
|
315
|
-
|
|
316
|
-
if (onLoadProgressEventCallbackSet || hasLoadAbortSignal) {
|
|
317
|
-
model_params.progress_callback_user_data = &(*this);
|
|
318
|
-
model_params.progress_callback = llamaModelParamsProgressCallback;
|
|
319
|
-
}
|
|
320
|
-
}
|
|
321
|
-
}
|
|
322
|
-
|
|
323
|
-
~AddonModel() {
|
|
324
|
-
dispose();
|
|
325
|
-
}
|
|
326
|
-
|
|
327
|
-
void dispose() {
|
|
328
|
-
if (disposed) {
|
|
329
|
-
return;
|
|
330
|
-
}
|
|
331
|
-
|
|
332
|
-
disposed = true;
|
|
333
|
-
if (modelLoaded) {
|
|
334
|
-
modelLoaded = false;
|
|
335
|
-
llama_free_model(model);
|
|
336
|
-
|
|
337
|
-
adjustNapiExternalMemorySubtract(Env(), loadedModelSize);
|
|
338
|
-
loadedModelSize = 0;
|
|
339
|
-
}
|
|
340
|
-
|
|
341
|
-
if (hasAddonExportsRef) {
|
|
342
|
-
addonExportsRef.Unref();
|
|
343
|
-
hasAddonExportsRef = false;
|
|
344
|
-
}
|
|
345
|
-
}
|
|
346
|
-
|
|
347
|
-
Napi::Value Init(const Napi::CallbackInfo& info);
|
|
348
|
-
Napi::Value LoadLora(const Napi::CallbackInfo& info);
|
|
349
|
-
Napi::Value AbortActiveModelLoad(const Napi::CallbackInfo& info) {
|
|
350
|
-
abortModelLoad = true;
|
|
351
|
-
return info.Env().Undefined();
|
|
352
|
-
}
|
|
353
|
-
Napi::Value Dispose(const Napi::CallbackInfo& info);
|
|
354
|
-
|
|
355
|
-
Napi::Value Tokenize(const Napi::CallbackInfo& info) {
|
|
356
|
-
if (disposed) {
|
|
357
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
358
|
-
return info.Env().Undefined();
|
|
359
|
-
}
|
|
360
|
-
|
|
361
|
-
std::string text = info[0].As<Napi::String>().Utf8Value();
|
|
362
|
-
bool specialTokens = info[1].As<Napi::Boolean>().Value();
|
|
363
|
-
|
|
364
|
-
std::vector<llama_token> tokens = llama_tokenize(model, text, false, specialTokens);
|
|
365
|
-
|
|
366
|
-
Napi::Uint32Array result = Napi::Uint32Array::New(info.Env(), tokens.size());
|
|
367
|
-
for (size_t i = 0; i < tokens.size(); ++i) {
|
|
368
|
-
result[i] = static_cast<uint32_t>(tokens[i]);
|
|
369
|
-
}
|
|
370
|
-
|
|
371
|
-
return result;
|
|
372
|
-
}
|
|
373
|
-
Napi::Value Detokenize(const Napi::CallbackInfo& info) {
|
|
374
|
-
if (disposed) {
|
|
375
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
376
|
-
return info.Env().Undefined();
|
|
377
|
-
}
|
|
378
|
-
|
|
379
|
-
Napi::Uint32Array tokens = info[0].As<Napi::Uint32Array>();
|
|
380
|
-
bool decodeSpecialTokens = info.Length() > 0
|
|
381
|
-
? info[1].As<Napi::Boolean>().Value()
|
|
382
|
-
: false;
|
|
383
|
-
|
|
384
|
-
std::vector<char> result(8, 0);
|
|
385
|
-
const int n_length = llama_detokenize(model, (llama_token*)tokens.Data(), tokens.ElementLength(), result.data(), result.size(), false, decodeSpecialTokens);
|
|
386
|
-
|
|
387
|
-
if (n_length < 0) {
|
|
388
|
-
result.resize(-n_length);
|
|
389
|
-
int check = llama_detokenize(model, (llama_token*)tokens.Data(), tokens.ElementLength(), result.data(), result.size(), false, decodeSpecialTokens);
|
|
390
|
-
GGML_ASSERT(check == -n_length);
|
|
391
|
-
} else {
|
|
392
|
-
result.resize(n_length);
|
|
393
|
-
}
|
|
394
|
-
|
|
395
|
-
return Napi::String::New(info.Env(), result.data(), result.size());
|
|
396
|
-
}
|
|
397
|
-
|
|
398
|
-
Napi::Value GetTrainContextSize(const Napi::CallbackInfo& info) {
|
|
399
|
-
if (disposed) {
|
|
400
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
401
|
-
return info.Env().Undefined();
|
|
402
|
-
}
|
|
403
|
-
|
|
404
|
-
return Napi::Number::From(info.Env(), llama_n_ctx_train(model));
|
|
405
|
-
}
|
|
406
|
-
|
|
407
|
-
Napi::Value GetEmbeddingVectorSize(const Napi::CallbackInfo& info) {
|
|
408
|
-
if (disposed) {
|
|
409
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
410
|
-
return info.Env().Undefined();
|
|
411
|
-
}
|
|
412
|
-
|
|
413
|
-
return Napi::Number::From(info.Env(), llama_n_embd(model));
|
|
414
|
-
}
|
|
415
|
-
|
|
416
|
-
Napi::Value GetTotalSize(const Napi::CallbackInfo& info) {
|
|
417
|
-
if (disposed) {
|
|
418
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
419
|
-
return info.Env().Undefined();
|
|
420
|
-
}
|
|
421
|
-
|
|
422
|
-
return Napi::Number::From(info.Env(), llama_model_size(model));
|
|
423
|
-
}
|
|
424
|
-
|
|
425
|
-
Napi::Value GetTotalParameters(const Napi::CallbackInfo& info) {
|
|
426
|
-
if (disposed) {
|
|
427
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
428
|
-
return info.Env().Undefined();
|
|
429
|
-
}
|
|
430
|
-
|
|
431
|
-
return Napi::Number::From(info.Env(), llama_model_n_params(model));
|
|
432
|
-
}
|
|
433
|
-
|
|
434
|
-
Napi::Value GetModelDescription(const Napi::CallbackInfo& info) {
|
|
435
|
-
if (disposed) {
|
|
436
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
437
|
-
return info.Env().Undefined();
|
|
438
|
-
}
|
|
439
|
-
|
|
440
|
-
char model_desc[128];
|
|
441
|
-
int actual_length = llama_model_desc(model, model_desc, sizeof(model_desc));
|
|
442
|
-
|
|
443
|
-
return Napi::String::New(info.Env(), model_desc, actual_length);
|
|
444
|
-
}
|
|
445
|
-
|
|
446
|
-
Napi::Value TokenBos(const Napi::CallbackInfo& info) {
|
|
447
|
-
if (disposed) {
|
|
448
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
449
|
-
return info.Env().Undefined();
|
|
450
|
-
}
|
|
451
|
-
|
|
452
|
-
return getNapiControlToken(info, model, llama_token_bos(model));
|
|
453
|
-
}
|
|
454
|
-
Napi::Value TokenEos(const Napi::CallbackInfo& info) {
|
|
455
|
-
if (disposed) {
|
|
456
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
457
|
-
return info.Env().Undefined();
|
|
458
|
-
}
|
|
459
|
-
|
|
460
|
-
return getNapiControlToken(info, model, llama_token_eos(model));
|
|
461
|
-
}
|
|
462
|
-
Napi::Value TokenNl(const Napi::CallbackInfo& info) {
|
|
463
|
-
if (disposed) {
|
|
464
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
465
|
-
return info.Env().Undefined();
|
|
466
|
-
}
|
|
467
|
-
|
|
468
|
-
return getNapiToken(info, model, llama_token_nl(model));
|
|
469
|
-
}
|
|
470
|
-
Napi::Value PrefixToken(const Napi::CallbackInfo& info) {
|
|
471
|
-
if (disposed) {
|
|
472
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
473
|
-
return info.Env().Undefined();
|
|
474
|
-
}
|
|
475
|
-
|
|
476
|
-
return getNapiControlToken(info, model, llama_token_prefix(model));
|
|
477
|
-
}
|
|
478
|
-
Napi::Value MiddleToken(const Napi::CallbackInfo& info) {
|
|
479
|
-
if (disposed) {
|
|
480
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
481
|
-
return info.Env().Undefined();
|
|
482
|
-
}
|
|
483
|
-
|
|
484
|
-
return getNapiControlToken(info, model, llama_token_middle(model));
|
|
485
|
-
}
|
|
486
|
-
Napi::Value SuffixToken(const Napi::CallbackInfo& info) {
|
|
487
|
-
if (disposed) {
|
|
488
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
489
|
-
return info.Env().Undefined();
|
|
490
|
-
}
|
|
491
|
-
|
|
492
|
-
return getNapiControlToken(info, model, llama_token_suffix(model));
|
|
493
|
-
}
|
|
494
|
-
Napi::Value EotToken(const Napi::CallbackInfo& info) {
|
|
495
|
-
if (disposed) {
|
|
496
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
497
|
-
return info.Env().Undefined();
|
|
498
|
-
}
|
|
499
|
-
|
|
500
|
-
return getNapiControlToken(info, model, llama_token_eot(model));
|
|
501
|
-
}
|
|
502
|
-
Napi::Value GetTokenString(const Napi::CallbackInfo& info) {
|
|
503
|
-
if (disposed) {
|
|
504
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
505
|
-
return info.Env().Undefined();
|
|
506
|
-
}
|
|
507
|
-
|
|
508
|
-
int token = info[0].As<Napi::Number>().Int32Value();
|
|
509
|
-
std::stringstream ss;
|
|
510
|
-
|
|
511
|
-
const char* str = llama_token_get_text(model, token);
|
|
512
|
-
if (str == nullptr) {
|
|
513
|
-
return info.Env().Undefined();
|
|
514
|
-
}
|
|
515
|
-
|
|
516
|
-
ss << str;
|
|
517
|
-
|
|
518
|
-
return Napi::String::New(info.Env(), ss.str());
|
|
519
|
-
}
|
|
520
|
-
|
|
521
|
-
Napi::Value GetTokenAttributes(const Napi::CallbackInfo& info) {
|
|
522
|
-
if (disposed) {
|
|
523
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
524
|
-
return info.Env().Undefined();
|
|
525
|
-
}
|
|
526
|
-
|
|
527
|
-
if (info[0].IsNumber() == false) {
|
|
528
|
-
return Napi::Number::From(info.Env(), int32_t(LLAMA_TOKEN_ATTR_UNDEFINED));
|
|
529
|
-
}
|
|
530
|
-
|
|
531
|
-
int token = info[0].As<Napi::Number>().Int32Value();
|
|
532
|
-
auto tokenAttributes = llama_token_get_attr(model, token);
|
|
533
|
-
|
|
534
|
-
return Napi::Number::From(info.Env(), int32_t(tokenAttributes));
|
|
535
|
-
}
|
|
536
|
-
Napi::Value IsEogToken(const Napi::CallbackInfo& info) {
|
|
537
|
-
if (disposed) {
|
|
538
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
539
|
-
return info.Env().Undefined();
|
|
540
|
-
}
|
|
541
|
-
|
|
542
|
-
if (info[0].IsNumber() == false) {
|
|
543
|
-
return Napi::Boolean::New(info.Env(), false);
|
|
544
|
-
}
|
|
545
|
-
|
|
546
|
-
int token = info[0].As<Napi::Number>().Int32Value();
|
|
547
|
-
|
|
548
|
-
return Napi::Boolean::New(info.Env(), llama_token_is_eog(model, token));
|
|
549
|
-
}
|
|
550
|
-
Napi::Value GetVocabularyType(const Napi::CallbackInfo& info) {
|
|
551
|
-
if (disposed) {
|
|
552
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
553
|
-
return info.Env().Undefined();
|
|
554
|
-
}
|
|
555
|
-
|
|
556
|
-
auto vocabularyType = llama_vocab_type(model);
|
|
557
|
-
|
|
558
|
-
return Napi::Number::From(info.Env(), int32_t(vocabularyType));
|
|
559
|
-
}
|
|
560
|
-
Napi::Value ShouldPrependBosToken(const Napi::CallbackInfo& info) {
|
|
561
|
-
const int addBos = llama_add_bos_token(model);
|
|
562
|
-
|
|
563
|
-
bool shouldPrependBos = addBos != -1 ? bool(addBos) : (llama_vocab_type(model) == LLAMA_VOCAB_TYPE_SPM);
|
|
564
|
-
|
|
565
|
-
return Napi::Boolean::New(info.Env(), shouldPrependBos);
|
|
566
|
-
}
|
|
567
|
-
|
|
568
|
-
Napi::Value GetModelSize(const Napi::CallbackInfo& info) {
|
|
569
|
-
return Napi::Number::From(info.Env(), llama_model_size(model));
|
|
570
|
-
}
|
|
571
|
-
|
|
572
|
-
static void init(Napi::Object exports) {
|
|
573
|
-
exports.Set(
|
|
574
|
-
"AddonModel",
|
|
575
|
-
DefineClass(
|
|
576
|
-
exports.Env(),
|
|
577
|
-
"AddonModel",
|
|
578
|
-
{
|
|
579
|
-
InstanceMethod("init", &AddonModel::Init),
|
|
580
|
-
InstanceMethod("loadLora", &AddonModel::LoadLora),
|
|
581
|
-
InstanceMethod("abortActiveModelLoad", &AddonModel::AbortActiveModelLoad),
|
|
582
|
-
InstanceMethod("tokenize", &AddonModel::Tokenize),
|
|
583
|
-
InstanceMethod("detokenize", &AddonModel::Detokenize),
|
|
584
|
-
InstanceMethod("getTrainContextSize", &AddonModel::GetTrainContextSize),
|
|
585
|
-
InstanceMethod("getEmbeddingVectorSize", &AddonModel::GetEmbeddingVectorSize),
|
|
586
|
-
InstanceMethod("getTotalSize", &AddonModel::GetTotalSize),
|
|
587
|
-
InstanceMethod("getTotalParameters", &AddonModel::GetTotalParameters),
|
|
588
|
-
InstanceMethod("getModelDescription", &AddonModel::GetModelDescription),
|
|
589
|
-
InstanceMethod("tokenBos", &AddonModel::TokenBos),
|
|
590
|
-
InstanceMethod("tokenEos", &AddonModel::TokenEos),
|
|
591
|
-
InstanceMethod("tokenNl", &AddonModel::TokenNl),
|
|
592
|
-
InstanceMethod("prefixToken", &AddonModel::PrefixToken),
|
|
593
|
-
InstanceMethod("middleToken", &AddonModel::MiddleToken),
|
|
594
|
-
InstanceMethod("suffixToken", &AddonModel::SuffixToken),
|
|
595
|
-
InstanceMethod("eotToken", &AddonModel::EotToken),
|
|
596
|
-
InstanceMethod("getTokenString", &AddonModel::GetTokenString),
|
|
597
|
-
InstanceMethod("getTokenAttributes", &AddonModel::GetTokenAttributes),
|
|
598
|
-
InstanceMethod("isEogToken", &AddonModel::IsEogToken),
|
|
599
|
-
InstanceMethod("getVocabularyType", &AddonModel::GetVocabularyType),
|
|
600
|
-
InstanceMethod("shouldPrependBosToken", &AddonModel::ShouldPrependBosToken),
|
|
601
|
-
InstanceMethod("getModelSize", &AddonModel::GetModelSize),
|
|
602
|
-
InstanceMethod("dispose", &AddonModel::Dispose),
|
|
603
|
-
}
|
|
604
|
-
)
|
|
605
|
-
);
|
|
606
|
-
}
|
|
607
|
-
};
|
|
608
|
-
|
|
609
|
-
static bool llamaModelParamsProgressCallback(float progress, void * user_data) {
|
|
610
|
-
AddonModel* addonModel = (AddonModel *) user_data;
|
|
611
|
-
unsigned percentage = (unsigned) (100 * progress);
|
|
612
|
-
|
|
613
|
-
if (percentage > addonModel->modelLoadPercentage) {
|
|
614
|
-
addonModel->modelLoadPercentage = percentage;
|
|
615
|
-
|
|
616
|
-
// original llama.cpp logs
|
|
617
|
-
addonLlamaCppLogCallback(GGML_LOG_LEVEL_INFO, ".", nullptr);
|
|
618
|
-
if (percentage >= 100) {
|
|
619
|
-
addonLlamaCppLogCallback(GGML_LOG_LEVEL_INFO, "\n", nullptr);
|
|
620
|
-
}
|
|
621
|
-
}
|
|
622
|
-
|
|
623
|
-
if (progress > addonModel->rawModelLoadPercentage) {
|
|
624
|
-
addonModel->rawModelLoadPercentage = progress;
|
|
625
|
-
|
|
626
|
-
if (addonModel->onLoadProgressEventCallbackSet) {
|
|
627
|
-
addon_progress_event* data = new addon_progress_event {
|
|
628
|
-
progress
|
|
629
|
-
};
|
|
630
|
-
|
|
631
|
-
auto status = addonModel->addonThreadSafeOnLoadProgressEventCallback.NonBlockingCall(data);
|
|
632
|
-
|
|
633
|
-
if (status != napi_ok) {
|
|
634
|
-
delete data;
|
|
635
|
-
}
|
|
636
|
-
}
|
|
637
|
-
}
|
|
638
|
-
|
|
639
|
-
return !(addonModel->abortModelLoad);
|
|
640
|
-
}
|
|
641
|
-
|
|
642
|
-
class AddonModelLoadModelWorker : public Napi::AsyncWorker {
|
|
643
|
-
public:
|
|
644
|
-
AddonModel* model;
|
|
645
|
-
|
|
646
|
-
AddonModelLoadModelWorker(const Napi::Env& env, AddonModel* model)
|
|
647
|
-
: Napi::AsyncWorker(env, "AddonModelLoadModelWorker"),
|
|
648
|
-
model(model),
|
|
649
|
-
deferred(Napi::Promise::Deferred::New(env)) {
|
|
650
|
-
model->Ref();
|
|
651
|
-
}
|
|
652
|
-
~AddonModelLoadModelWorker() {
|
|
653
|
-
model->Unref();
|
|
654
|
-
}
|
|
655
|
-
|
|
656
|
-
Napi::Promise GetPromise() {
|
|
657
|
-
return deferred.Promise();
|
|
658
|
-
}
|
|
659
|
-
|
|
660
|
-
protected:
|
|
661
|
-
Napi::Promise::Deferred deferred;
|
|
662
|
-
|
|
663
|
-
void Execute() {
|
|
664
|
-
try {
|
|
665
|
-
model->model = llama_load_model_from_file(model->modelPath.c_str(), model->model_params);
|
|
666
|
-
|
|
667
|
-
model->modelLoaded = model->model != nullptr && model->model != NULL;
|
|
668
|
-
} catch (const std::exception& e) {
|
|
669
|
-
SetError(e.what());
|
|
670
|
-
} catch(...) {
|
|
671
|
-
SetError("Unknown error when calling \"llama_load_model_from_file\"");
|
|
672
|
-
}
|
|
673
|
-
}
|
|
674
|
-
void OnOK() {
|
|
675
|
-
if (model->modelLoaded) {
|
|
676
|
-
uint64_t modelSize = llama_model_size(model->model);
|
|
677
|
-
adjustNapiExternalMemoryAdd(Env(), modelSize);
|
|
678
|
-
model->loadedModelSize = modelSize;
|
|
679
|
-
}
|
|
680
|
-
|
|
681
|
-
deferred.Resolve(Napi::Boolean::New(Env(), model->modelLoaded));
|
|
682
|
-
if (model->onLoadProgressEventCallbackSet) {
|
|
683
|
-
model->addonThreadSafeOnLoadProgressEventCallback.Release();
|
|
684
|
-
}
|
|
685
|
-
}
|
|
686
|
-
void OnError(const Napi::Error& err) {
|
|
687
|
-
deferred.Reject(err.Value());
|
|
688
|
-
}
|
|
689
|
-
};
|
|
690
|
-
class AddonModelUnloadModelWorker : public Napi::AsyncWorker {
|
|
691
|
-
public:
|
|
692
|
-
AddonModel* model;
|
|
693
|
-
|
|
694
|
-
AddonModelUnloadModelWorker(const Napi::Env& env, AddonModel* model)
|
|
695
|
-
: Napi::AsyncWorker(env, "AddonModelUnloadModelWorker"),
|
|
696
|
-
model(model),
|
|
697
|
-
deferred(Napi::Promise::Deferred::New(env)) {
|
|
698
|
-
model->Ref();
|
|
699
|
-
}
|
|
700
|
-
~AddonModelUnloadModelWorker() {
|
|
701
|
-
model->Unref();
|
|
702
|
-
}
|
|
703
|
-
|
|
704
|
-
Napi::Promise GetPromise() {
|
|
705
|
-
return deferred.Promise();
|
|
706
|
-
}
|
|
707
|
-
|
|
708
|
-
protected:
|
|
709
|
-
Napi::Promise::Deferred deferred;
|
|
710
|
-
|
|
711
|
-
void Execute() {
|
|
712
|
-
try {
|
|
713
|
-
llama_free_model(model->model);
|
|
714
|
-
model->modelLoaded = false;
|
|
715
|
-
|
|
716
|
-
model->dispose();
|
|
717
|
-
} catch (const std::exception& e) {
|
|
718
|
-
SetError(e.what());
|
|
719
|
-
} catch(...) {
|
|
720
|
-
SetError("Unknown error when calling \"llama_free_model\"");
|
|
721
|
-
}
|
|
722
|
-
}
|
|
723
|
-
void OnOK() {
|
|
724
|
-
adjustNapiExternalMemorySubtract(Env(), model->loadedModelSize);
|
|
725
|
-
model->loadedModelSize = 0;
|
|
726
|
-
|
|
727
|
-
deferred.Resolve(Env().Undefined());
|
|
728
|
-
}
|
|
729
|
-
void OnError(const Napi::Error& err) {
|
|
730
|
-
deferred.Reject(err.Value());
|
|
731
|
-
}
|
|
732
|
-
};
|
|
733
|
-
class AddonModelLoadLoraWorker : public Napi::AsyncWorker {
|
|
734
|
-
public:
|
|
735
|
-
AddonModel* model;
|
|
736
|
-
std::string loraFilePath;
|
|
737
|
-
float loraScale;
|
|
738
|
-
int32_t loraThreads;
|
|
739
|
-
std::string baseModelPath;
|
|
740
|
-
|
|
741
|
-
AddonModelLoadLoraWorker(
|
|
742
|
-
const Napi::Env& env,
|
|
743
|
-
AddonModel* model,
|
|
744
|
-
std::string loraFilePath,
|
|
745
|
-
float loraScale,
|
|
746
|
-
int32_t loraThreads,
|
|
747
|
-
std::string baseModelPath
|
|
748
|
-
)
|
|
749
|
-
: Napi::AsyncWorker(env, "AddonModelLoadLoraWorker"),
|
|
750
|
-
model(model),
|
|
751
|
-
loraFilePath(loraFilePath),
|
|
752
|
-
loraScale(loraScale),
|
|
753
|
-
loraThreads(loraThreads),
|
|
754
|
-
baseModelPath(baseModelPath),
|
|
755
|
-
deferred(Napi::Promise::Deferred::New(env)) {
|
|
756
|
-
model->Ref();
|
|
757
|
-
}
|
|
758
|
-
~AddonModelLoadLoraWorker() {
|
|
759
|
-
model->Unref();
|
|
760
|
-
}
|
|
761
|
-
|
|
762
|
-
Napi::Promise GetPromise() {
|
|
763
|
-
return deferred.Promise();
|
|
764
|
-
}
|
|
765
|
-
|
|
766
|
-
protected:
|
|
767
|
-
Napi::Promise::Deferred deferred;
|
|
768
|
-
|
|
769
|
-
void Execute() {
|
|
770
|
-
try {
|
|
771
|
-
const auto res = llama_model_apply_lora_from_file(
|
|
772
|
-
model->model,
|
|
773
|
-
loraFilePath.c_str(),
|
|
774
|
-
loraScale,
|
|
775
|
-
baseModelPath.empty() ? NULL : baseModelPath.c_str(),
|
|
776
|
-
loraThreads
|
|
777
|
-
);
|
|
778
|
-
|
|
779
|
-
if (res != 0) {
|
|
780
|
-
SetError(
|
|
781
|
-
std::string(
|
|
782
|
-
std::string("Failed to apply LoRA \"") + loraFilePath + std::string("\"") + (
|
|
783
|
-
baseModelPath.empty()
|
|
784
|
-
? std::string("")
|
|
785
|
-
: (std::string(" with base model \"") + baseModelPath + std::string("\""))
|
|
786
|
-
)
|
|
787
|
-
)
|
|
788
|
-
);
|
|
789
|
-
}
|
|
790
|
-
} catch (const std::exception& e) {
|
|
791
|
-
SetError(e.what());
|
|
792
|
-
} catch(...) {
|
|
793
|
-
SetError("Unknown error when calling \"llama_model_apply_lora_from_file\"");
|
|
794
|
-
}
|
|
795
|
-
}
|
|
796
|
-
void OnOK() {
|
|
797
|
-
deferred.Resolve(Env().Undefined());
|
|
798
|
-
}
|
|
799
|
-
void OnError(const Napi::Error& err) {
|
|
800
|
-
deferred.Reject(err.Value());
|
|
801
|
-
}
|
|
802
|
-
};
|
|
803
|
-
|
|
804
|
-
Napi::Value AddonModel::Init(const Napi::CallbackInfo& info) {
|
|
805
|
-
if (disposed) {
|
|
806
|
-
Napi::Error::New(info.Env(), "Model is disposed").ThrowAsJavaScriptException();
|
|
807
|
-
return info.Env().Undefined();
|
|
808
|
-
}
|
|
809
|
-
|
|
810
|
-
AddonModelLoadModelWorker* worker = new AddonModelLoadModelWorker(this->Env(), this);
|
|
811
|
-
worker->Queue();
|
|
812
|
-
return worker->GetPromise();
|
|
813
|
-
}
|
|
814
|
-
Napi::Value AddonModel::LoadLora(const Napi::CallbackInfo& info) {
|
|
815
|
-
std::string loraFilePath = info[0].As<Napi::String>().Utf8Value();
|
|
816
|
-
float scale = info[1].As<Napi::Number>().FloatValue();
|
|
817
|
-
int32_t threads = info[2].As<Napi::Number>().Int32Value();
|
|
818
|
-
std::string baseModelPath = (info.Length() > 3 && info[3].IsString()) ? info[3].As<Napi::String>().Utf8Value() : std::string("");
|
|
819
|
-
|
|
820
|
-
int32_t resolvedThreads = threads == 0 ? std::thread::hardware_concurrency() : threads;
|
|
821
|
-
|
|
822
|
-
AddonModelLoadLoraWorker* worker = new AddonModelLoadLoraWorker(this->Env(), this, loraFilePath, scale, threads, baseModelPath);
|
|
823
|
-
worker->Queue();
|
|
824
|
-
return worker->GetPromise();
|
|
825
|
-
}
|
|
826
|
-
Napi::Value AddonModel::Dispose(const Napi::CallbackInfo& info) {
|
|
827
|
-
if (disposed) {
|
|
828
|
-
return info.Env().Undefined();
|
|
829
|
-
}
|
|
830
|
-
|
|
831
|
-
if (modelLoaded) {
|
|
832
|
-
modelLoaded = false;
|
|
833
|
-
|
|
834
|
-
AddonModelUnloadModelWorker* worker = new AddonModelUnloadModelWorker(this->Env(), this);
|
|
835
|
-
worker->Queue();
|
|
836
|
-
return worker->GetPromise();
|
|
837
|
-
} else {
|
|
838
|
-
dispose();
|
|
839
|
-
|
|
840
|
-
Napi::Promise::Deferred deferred = Napi::Promise::Deferred::New(info.Env());
|
|
841
|
-
deferred.Resolve(info.Env().Undefined());
|
|
842
|
-
return deferred.Promise();
|
|
843
|
-
}
|
|
844
|
-
}
|
|
845
|
-
|
|
846
|
-
class AddonGrammar : public Napi::ObjectWrap<AddonGrammar> {
|
|
847
|
-
public:
|
|
848
|
-
grammar_parser::parse_state parsed_grammar;
|
|
849
|
-
Napi::Reference<Napi::Object> addonExportsRef;
|
|
850
|
-
bool hasAddonExportsRef = false;
|
|
851
|
-
|
|
852
|
-
AddonGrammar(const Napi::CallbackInfo& info) : Napi::ObjectWrap<AddonGrammar>(info) {
|
|
853
|
-
// Get the model path
|
|
854
|
-
std::string grammarCode = info[0].As<Napi::String>().Utf8Value();
|
|
855
|
-
bool should_print_grammar = false;
|
|
856
|
-
|
|
857
|
-
if (info.Length() > 1 && info[1].IsObject()) {
|
|
858
|
-
Napi::Object options = info[1].As<Napi::Object>();
|
|
859
|
-
|
|
860
|
-
if (options.Has("addonExports")) {
|
|
861
|
-
addonExportsRef = Napi::Persistent(options.Get("addonExports").As<Napi::Object>());
|
|
862
|
-
hasAddonExportsRef = true;
|
|
863
|
-
}
|
|
864
|
-
|
|
865
|
-
if (options.Has("printGrammar")) {
|
|
866
|
-
should_print_grammar = options.Get("printGrammar").As<Napi::Boolean>().Value();
|
|
867
|
-
}
|
|
868
|
-
}
|
|
869
|
-
|
|
870
|
-
parsed_grammar = grammar_parser::parse(grammarCode.c_str());
|
|
871
|
-
// will be empty (default) if there are parse errors
|
|
872
|
-
if (parsed_grammar.rules.empty()) {
|
|
873
|
-
Napi::Error::New(info.Env(), "Failed to parse grammar").ThrowAsJavaScriptException();
|
|
874
|
-
return;
|
|
875
|
-
}
|
|
876
|
-
|
|
877
|
-
if (should_print_grammar) {
|
|
878
|
-
grammar_parser::print_grammar(stderr, parsed_grammar);
|
|
879
|
-
}
|
|
880
|
-
}
|
|
881
|
-
|
|
882
|
-
~AddonGrammar() {
|
|
883
|
-
if (hasAddonExportsRef) {
|
|
884
|
-
addonExportsRef.Unref();
|
|
885
|
-
hasAddonExportsRef = false;
|
|
886
|
-
}
|
|
887
|
-
}
|
|
888
|
-
|
|
889
|
-
static void init(Napi::Object exports) {
|
|
890
|
-
exports.Set("AddonGrammar", DefineClass(exports.Env(), "AddonGrammar", {}));
|
|
891
|
-
}
|
|
892
|
-
};
|
|
893
|
-
|
|
894
|
-
class AddonGrammarEvaluationState : public Napi::ObjectWrap<AddonGrammarEvaluationState> {
|
|
895
|
-
public:
|
|
896
|
-
AddonGrammar* grammarDef;
|
|
897
|
-
llama_grammar* grammar = nullptr;
|
|
898
|
-
|
|
899
|
-
AddonGrammarEvaluationState(const Napi::CallbackInfo& info) : Napi::ObjectWrap<AddonGrammarEvaluationState>(info) {
|
|
900
|
-
grammarDef = Napi::ObjectWrap<AddonGrammar>::Unwrap(info[0].As<Napi::Object>());
|
|
901
|
-
grammarDef->Ref();
|
|
902
|
-
|
|
903
|
-
std::vector<const llama_grammar_element*> grammar_rules(grammarDef->parsed_grammar.c_rules());
|
|
904
|
-
grammar = llama_grammar_init(grammar_rules.data(), grammar_rules.size(), grammarDef->parsed_grammar.symbol_ids.at("root"));
|
|
905
|
-
}
|
|
906
|
-
|
|
907
|
-
~AddonGrammarEvaluationState() {
|
|
908
|
-
grammarDef->Unref();
|
|
909
|
-
|
|
910
|
-
if (grammar != nullptr) {
|
|
911
|
-
llama_grammar_free(grammar);
|
|
912
|
-
grammar = nullptr;
|
|
913
|
-
}
|
|
914
|
-
}
|
|
915
|
-
|
|
916
|
-
static void init(Napi::Object exports) {
|
|
917
|
-
exports.Set("AddonGrammarEvaluationState", DefineClass(exports.Env(), "AddonGrammarEvaluationState", {}));
|
|
918
|
-
}
|
|
919
|
-
};
|
|
920
|
-
|
|
921
|
-
class AddonContext : public Napi::ObjectWrap<AddonContext> {
|
|
922
|
-
public:
|
|
923
|
-
AddonModel* model;
|
|
924
|
-
llama_context_params context_params;
|
|
925
|
-
llama_context* ctx;
|
|
926
|
-
llama_batch batch;
|
|
927
|
-
uint64_t batchMemorySize = 0;
|
|
928
|
-
bool has_batch = false;
|
|
929
|
-
int32_t batch_n_tokens = 0;
|
|
930
|
-
int n_cur = 0;
|
|
931
|
-
|
|
932
|
-
uint64_t loadedContextMemorySize = 0;
|
|
933
|
-
bool contextLoaded = false;
|
|
934
|
-
|
|
935
|
-
bool disposed = false;
|
|
936
|
-
|
|
937
|
-
AddonContext(const Napi::CallbackInfo& info) : Napi::ObjectWrap<AddonContext>(info) {
|
|
938
|
-
model = Napi::ObjectWrap<AddonModel>::Unwrap(info[0].As<Napi::Object>());
|
|
939
|
-
model->Ref();
|
|
940
|
-
|
|
941
|
-
context_params = llama_context_default_params();
|
|
942
|
-
context_params.seed = -1;
|
|
943
|
-
context_params.n_ctx = 4096;
|
|
944
|
-
context_params.n_threads = 6;
|
|
945
|
-
context_params.n_threads_batch = context_params.n_threads;
|
|
946
|
-
|
|
947
|
-
if (info.Length() > 1 && info[1].IsObject()) {
|
|
948
|
-
Napi::Object options = info[1].As<Napi::Object>();
|
|
949
|
-
|
|
950
|
-
if (options.Has("noSeed")) {
|
|
951
|
-
context_params.seed = time(NULL);
|
|
952
|
-
} else if (options.Has("seed")) {
|
|
953
|
-
context_params.seed = options.Get("seed").As<Napi::Number>().Uint32Value();
|
|
954
|
-
}
|
|
955
|
-
|
|
956
|
-
if (options.Has("contextSize")) {
|
|
957
|
-
context_params.n_ctx = options.Get("contextSize").As<Napi::Number>().Uint32Value();
|
|
958
|
-
}
|
|
959
|
-
|
|
960
|
-
if (options.Has("batchSize")) {
|
|
961
|
-
context_params.n_batch = options.Get("batchSize").As<Napi::Number>().Uint32Value();
|
|
962
|
-
context_params.n_ubatch = context_params.n_batch; // the batch queue is managed in the JS side, so there's no need for managing it on the C++ side
|
|
963
|
-
}
|
|
964
|
-
|
|
965
|
-
if (options.Has("sequences")) {
|
|
966
|
-
context_params.n_seq_max = options.Get("sequences").As<Napi::Number>().Uint32Value();
|
|
967
|
-
}
|
|
968
|
-
|
|
969
|
-
if (options.Has("embeddings")) {
|
|
970
|
-
context_params.embeddings = options.Get("embeddings").As<Napi::Boolean>().Value();
|
|
971
|
-
}
|
|
972
|
-
|
|
973
|
-
if (options.Has("flashAttention")) {
|
|
974
|
-
context_params.flash_attn = options.Get("flashAttention").As<Napi::Boolean>().Value();
|
|
975
|
-
}
|
|
976
|
-
|
|
977
|
-
if (options.Has("threads")) {
|
|
978
|
-
const auto n_threads = options.Get("threads").As<Napi::Number>().Uint32Value();
|
|
979
|
-
const auto resolved_n_threads = n_threads == 0 ? std::thread::hardware_concurrency() : n_threads;
|
|
980
|
-
|
|
981
|
-
context_params.n_threads = resolved_n_threads;
|
|
982
|
-
context_params.n_threads_batch = resolved_n_threads;
|
|
983
|
-
}
|
|
984
|
-
}
|
|
985
|
-
}
|
|
986
|
-
~AddonContext() {
|
|
987
|
-
dispose();
|
|
988
|
-
}
|
|
989
|
-
|
|
990
|
-
void dispose() {
|
|
991
|
-
if (disposed) {
|
|
992
|
-
return;
|
|
993
|
-
}
|
|
994
|
-
|
|
995
|
-
disposed = true;
|
|
996
|
-
if (contextLoaded) {
|
|
997
|
-
contextLoaded = false;
|
|
998
|
-
llama_free(ctx);
|
|
999
|
-
|
|
1000
|
-
adjustNapiExternalMemorySubtract(Env(), loadedContextMemorySize);
|
|
1001
|
-
loadedContextMemorySize = 0;
|
|
1002
|
-
}
|
|
1003
|
-
|
|
1004
|
-
model->Unref();
|
|
1005
|
-
|
|
1006
|
-
disposeBatch();
|
|
1007
|
-
}
|
|
1008
|
-
void disposeBatch() {
|
|
1009
|
-
if (!has_batch) {
|
|
1010
|
-
return;
|
|
1011
|
-
}
|
|
1012
|
-
|
|
1013
|
-
llama_batch_free(batch);
|
|
1014
|
-
has_batch = false;
|
|
1015
|
-
batch_n_tokens = 0;
|
|
1016
|
-
|
|
1017
|
-
adjustNapiExternalMemorySubtract(Env(), batchMemorySize);
|
|
1018
|
-
batchMemorySize = 0;
|
|
1019
|
-
}
|
|
1020
|
-
|
|
1021
|
-
Napi::Value Init(const Napi::CallbackInfo& info);
|
|
1022
|
-
Napi::Value Dispose(const Napi::CallbackInfo& info);
|
|
1023
|
-
|
|
1024
|
-
Napi::Value GetContextSize(const Napi::CallbackInfo& info) {
|
|
1025
|
-
if (disposed) {
|
|
1026
|
-
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
1027
|
-
return info.Env().Undefined();
|
|
1028
|
-
}
|
|
1029
|
-
|
|
1030
|
-
return Napi::Number::From(info.Env(), llama_n_ctx(ctx));
|
|
1031
|
-
}
|
|
1032
|
-
Napi::Value InitBatch(const Napi::CallbackInfo& info) {
|
|
1033
|
-
if (disposed) {
|
|
1034
|
-
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
1035
|
-
return info.Env().Undefined();
|
|
1036
|
-
}
|
|
1037
|
-
|
|
1038
|
-
if (has_batch) {
|
|
1039
|
-
llama_batch_free(batch);
|
|
1040
|
-
}
|
|
1041
|
-
|
|
1042
|
-
int32_t n_tokens = info[0].As<Napi::Number>().Int32Value();
|
|
1043
|
-
|
|
1044
|
-
batch = llama_batch_init(n_tokens, 0, 1);
|
|
1045
|
-
has_batch = true;
|
|
1046
|
-
batch_n_tokens = n_tokens;
|
|
1047
|
-
|
|
1048
|
-
uint64_t newBatchMemorySize = calculateBatchMemorySize(n_tokens, llama_n_embd(model->model), context_params.n_batch);
|
|
1049
|
-
if (newBatchMemorySize > batchMemorySize) {
|
|
1050
|
-
adjustNapiExternalMemoryAdd(Env(), newBatchMemorySize - batchMemorySize);
|
|
1051
|
-
batchMemorySize = newBatchMemorySize;
|
|
1052
|
-
} else if (newBatchMemorySize < batchMemorySize) {
|
|
1053
|
-
adjustNapiExternalMemorySubtract(Env(), batchMemorySize - newBatchMemorySize);
|
|
1054
|
-
batchMemorySize = newBatchMemorySize;
|
|
1055
|
-
}
|
|
1056
|
-
|
|
1057
|
-
return info.Env().Undefined();
|
|
1058
|
-
}
|
|
1059
|
-
Napi::Value DisposeBatch(const Napi::CallbackInfo& info) {
|
|
1060
|
-
if (disposed) {
|
|
1061
|
-
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
1062
|
-
return info.Env().Undefined();
|
|
1063
|
-
}
|
|
1064
|
-
|
|
1065
|
-
disposeBatch();
|
|
1066
|
-
|
|
1067
|
-
return info.Env().Undefined();
|
|
1068
|
-
}
|
|
1069
|
-
Napi::Value AddToBatch(const Napi::CallbackInfo& info) {
|
|
1070
|
-
if (!has_batch) {
|
|
1071
|
-
Napi::Error::New(info.Env(), "No batch is initialized").ThrowAsJavaScriptException();
|
|
1072
|
-
return info.Env().Undefined();
|
|
1073
|
-
}
|
|
1074
|
-
|
|
1075
|
-
int32_t sequenceId = info[0].As<Napi::Number>().Int32Value();
|
|
1076
|
-
int32_t firstTokenContextIndex = info[1].As<Napi::Number>().Int32Value();
|
|
1077
|
-
Napi::Uint32Array tokens = info[2].As<Napi::Uint32Array>();
|
|
1078
|
-
bool generateLogitAtTheEnd = info[3].As<Napi::Boolean>().Value();
|
|
1079
|
-
|
|
1080
|
-
auto tokensLength = tokens.ElementLength();
|
|
1081
|
-
GGML_ASSERT(batch.n_tokens + tokensLength <= batch_n_tokens);
|
|
1082
|
-
|
|
1083
|
-
for (size_t i = 0; i < tokensLength; i++) {
|
|
1084
|
-
llama_batch_add(batch, static_cast<llama_token>(tokens[i]), firstTokenContextIndex + i, { sequenceId }, false);
|
|
1085
|
-
}
|
|
1086
|
-
|
|
1087
|
-
if (generateLogitAtTheEnd) {
|
|
1088
|
-
batch.logits[batch.n_tokens - 1] = true;
|
|
1089
|
-
|
|
1090
|
-
auto logit_index = batch.n_tokens - 1;
|
|
1091
|
-
|
|
1092
|
-
return Napi::Number::From(info.Env(), logit_index);
|
|
1093
|
-
}
|
|
1094
|
-
|
|
1095
|
-
return info.Env().Undefined();
|
|
1096
|
-
}
|
|
1097
|
-
Napi::Value DisposeSequence(const Napi::CallbackInfo& info) {
|
|
1098
|
-
if (disposed) {
|
|
1099
|
-
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
1100
|
-
return info.Env().Undefined();
|
|
1101
|
-
}
|
|
1102
|
-
|
|
1103
|
-
int32_t sequenceId = info[0].As<Napi::Number>().Int32Value();
|
|
1104
|
-
|
|
1105
|
-
bool result = llama_kv_cache_seq_rm(ctx, sequenceId, -1, -1);
|
|
1106
|
-
|
|
1107
|
-
if (!result) {
|
|
1108
|
-
Napi::Error::New(info.Env(), "Failed to dispose sequence").ThrowAsJavaScriptException();
|
|
1109
|
-
return info.Env().Undefined();
|
|
1110
|
-
}
|
|
1111
|
-
|
|
1112
|
-
return info.Env().Undefined();
|
|
1113
|
-
}
|
|
1114
|
-
Napi::Value RemoveTokenCellsFromSequence(const Napi::CallbackInfo& info) {
|
|
1115
|
-
if (disposed) {
|
|
1116
|
-
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
1117
|
-
return info.Env().Undefined();
|
|
1118
|
-
}
|
|
1119
|
-
|
|
1120
|
-
int32_t sequenceId = info[0].As<Napi::Number>().Int32Value();
|
|
1121
|
-
int32_t startPos = info[1].As<Napi::Number>().Int32Value();
|
|
1122
|
-
int32_t endPos = info[2].As<Napi::Number>().Int32Value();
|
|
1123
|
-
|
|
1124
|
-
bool result = llama_kv_cache_seq_rm(ctx, sequenceId, startPos, endPos);
|
|
1125
|
-
|
|
1126
|
-
return Napi::Boolean::New(info.Env(), result);
|
|
1127
|
-
}
|
|
1128
|
-
Napi::Value ShiftSequenceTokenCells(const Napi::CallbackInfo& info) {
|
|
1129
|
-
if (disposed) {
|
|
1130
|
-
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
1131
|
-
return info.Env().Undefined();
|
|
1132
|
-
}
|
|
1133
|
-
|
|
1134
|
-
int32_t sequenceId = info[0].As<Napi::Number>().Int32Value();
|
|
1135
|
-
int32_t startPos = info[1].As<Napi::Number>().Int32Value();
|
|
1136
|
-
int32_t endPos = info[2].As<Napi::Number>().Int32Value();
|
|
1137
|
-
int32_t shiftDelta = info[3].As<Napi::Number>().Int32Value();
|
|
1138
|
-
|
|
1139
|
-
llama_kv_cache_seq_add(ctx, sequenceId, startPos, endPos, shiftDelta);
|
|
1140
|
-
|
|
1141
|
-
return info.Env().Undefined();
|
|
1142
|
-
}
|
|
1143
|
-
Napi::Value DecodeBatch(const Napi::CallbackInfo& info);
|
|
1144
|
-
Napi::Value SampleToken(const Napi::CallbackInfo& info);
|
|
1145
|
-
|
|
1146
|
-
Napi::Value AcceptGrammarEvaluationStateToken(const Napi::CallbackInfo& info) {
|
|
1147
|
-
AddonGrammarEvaluationState* grammar_evaluation_state =
|
|
1148
|
-
Napi::ObjectWrap<AddonGrammarEvaluationState>::Unwrap(info[0].As<Napi::Object>());
|
|
1149
|
-
llama_token tokenId = info[1].As<Napi::Number>().Int32Value();
|
|
1150
|
-
|
|
1151
|
-
if ((grammar_evaluation_state)->grammar != nullptr) {
|
|
1152
|
-
llama_grammar_accept_token(ctx, (grammar_evaluation_state)->grammar, tokenId);
|
|
1153
|
-
}
|
|
1154
|
-
|
|
1155
|
-
return info.Env().Undefined();
|
|
1156
|
-
}
|
|
1157
|
-
|
|
1158
|
-
Napi::Value CanBeNextTokenForGrammarEvaluationState(const Napi::CallbackInfo& info) {
|
|
1159
|
-
AddonGrammarEvaluationState* grammar_evaluation_state =
|
|
1160
|
-
Napi::ObjectWrap<AddonGrammarEvaluationState>::Unwrap(info[0].As<Napi::Object>());
|
|
1161
|
-
llama_token tokenId = info[1].As<Napi::Number>().Int32Value();
|
|
1162
|
-
|
|
1163
|
-
if ((grammar_evaluation_state)->grammar != nullptr) {
|
|
1164
|
-
std::vector<llama_token_data> candidates;
|
|
1165
|
-
candidates.reserve(1);
|
|
1166
|
-
candidates.emplace_back(llama_token_data { tokenId, 1, 0.0f });
|
|
1167
|
-
|
|
1168
|
-
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
|
1169
|
-
|
|
1170
|
-
llama_sample_grammar(ctx, &candidates_p, (grammar_evaluation_state)->grammar);
|
|
1171
|
-
|
|
1172
|
-
if (candidates_p.size == 0 || candidates_p.data[0].logit == -INFINITY) {
|
|
1173
|
-
return Napi::Boolean::New(info.Env(), false);
|
|
1174
|
-
}
|
|
1175
|
-
|
|
1176
|
-
return Napi::Boolean::New(info.Env(), true);
|
|
1177
|
-
}
|
|
1178
|
-
|
|
1179
|
-
return Napi::Boolean::New(info.Env(), false);
|
|
1180
|
-
}
|
|
1181
|
-
|
|
1182
|
-
Napi::Value GetEmbedding(const Napi::CallbackInfo& info) {
|
|
1183
|
-
if (disposed) {
|
|
1184
|
-
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
1185
|
-
return info.Env().Undefined();
|
|
1186
|
-
}
|
|
1187
|
-
|
|
1188
|
-
int32_t inputTokensLength = info[0].As<Napi::Number>().Int32Value();
|
|
1189
|
-
|
|
1190
|
-
if (inputTokensLength <= 0) {
|
|
1191
|
-
Napi::Error::New(info.Env(), "Invalid input tokens length").ThrowAsJavaScriptException();
|
|
1192
|
-
return info.Env().Undefined();
|
|
1193
|
-
}
|
|
1194
|
-
|
|
1195
|
-
const int n_embd = llama_n_embd(model->model);
|
|
1196
|
-
const auto* embeddings = llama_get_embeddings_seq(ctx, 0);
|
|
1197
|
-
if (embeddings == NULL) {
|
|
1198
|
-
embeddings = llama_get_embeddings_ith(ctx, inputTokensLength - 1);
|
|
1199
|
-
|
|
1200
|
-
if (embeddings == NULL) {
|
|
1201
|
-
Napi::Error::New(info.Env(), std::string("Failed to get embeddings for token ") + std::to_string(inputTokensLength - 1)).ThrowAsJavaScriptException();
|
|
1202
|
-
return info.Env().Undefined();
|
|
1203
|
-
}
|
|
1204
|
-
}
|
|
1205
|
-
|
|
1206
|
-
Napi::Float64Array result = Napi::Float64Array::New(info.Env(), n_embd);
|
|
1207
|
-
for (size_t i = 0; i < n_embd; ++i) {
|
|
1208
|
-
result[i] = embeddings[i];
|
|
1209
|
-
}
|
|
1210
|
-
|
|
1211
|
-
return result;
|
|
1212
|
-
}
|
|
1213
|
-
|
|
1214
|
-
Napi::Value GetStateSize(const Napi::CallbackInfo& info) {
|
|
1215
|
-
if (disposed) {
|
|
1216
|
-
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
1217
|
-
return info.Env().Undefined();
|
|
1218
|
-
}
|
|
1219
|
-
|
|
1220
|
-
return Napi::Number::From(info.Env(), llama_state_get_size(ctx));
|
|
1221
|
-
}
|
|
1222
|
-
|
|
1223
|
-
Napi::Value PrintTimings(const Napi::CallbackInfo& info) {
|
|
1224
|
-
llama_print_timings(ctx);
|
|
1225
|
-
llama_reset_timings(ctx);
|
|
1226
|
-
return info.Env().Undefined();
|
|
1227
|
-
}
|
|
1228
|
-
|
|
1229
|
-
static void init(Napi::Object exports) {
|
|
1230
|
-
exports.Set(
|
|
1231
|
-
"AddonContext",
|
|
1232
|
-
DefineClass(
|
|
1233
|
-
exports.Env(),
|
|
1234
|
-
"AddonContext",
|
|
1235
|
-
{
|
|
1236
|
-
InstanceMethod("init", &AddonContext::Init),
|
|
1237
|
-
InstanceMethod("getContextSize", &AddonContext::GetContextSize),
|
|
1238
|
-
InstanceMethod("initBatch", &AddonContext::InitBatch),
|
|
1239
|
-
InstanceMethod("addToBatch", &AddonContext::AddToBatch),
|
|
1240
|
-
InstanceMethod("disposeSequence", &AddonContext::DisposeSequence),
|
|
1241
|
-
InstanceMethod("removeTokenCellsFromSequence", &AddonContext::RemoveTokenCellsFromSequence),
|
|
1242
|
-
InstanceMethod("shiftSequenceTokenCells", &AddonContext::ShiftSequenceTokenCells),
|
|
1243
|
-
InstanceMethod("decodeBatch", &AddonContext::DecodeBatch),
|
|
1244
|
-
InstanceMethod("sampleToken", &AddonContext::SampleToken),
|
|
1245
|
-
InstanceMethod("acceptGrammarEvaluationStateToken", &AddonContext::AcceptGrammarEvaluationStateToken),
|
|
1246
|
-
InstanceMethod("canBeNextTokenForGrammarEvaluationState", &AddonContext::CanBeNextTokenForGrammarEvaluationState),
|
|
1247
|
-
InstanceMethod("getEmbedding", &AddonContext::GetEmbedding),
|
|
1248
|
-
InstanceMethod("getStateSize", &AddonContext::GetStateSize),
|
|
1249
|
-
InstanceMethod("printTimings", &AddonContext::PrintTimings),
|
|
1250
|
-
InstanceMethod("dispose", &AddonContext::Dispose),
|
|
1251
|
-
}
|
|
1252
|
-
)
|
|
1253
|
-
);
|
|
1254
|
-
}
|
|
1255
|
-
};
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
class AddonContextDecodeBatchWorker : public Napi::AsyncWorker {
|
|
1259
|
-
public:
|
|
1260
|
-
AddonContext* ctx;
|
|
1261
|
-
|
|
1262
|
-
AddonContextDecodeBatchWorker(const Napi::Env& env, AddonContext* ctx)
|
|
1263
|
-
: Napi::AsyncWorker(env, "AddonContextDecodeBatchWorker"),
|
|
1264
|
-
ctx(ctx),
|
|
1265
|
-
deferred(Napi::Promise::Deferred::New(env)) {
|
|
1266
|
-
ctx->Ref();
|
|
1267
|
-
}
|
|
1268
|
-
~AddonContextDecodeBatchWorker() {
|
|
1269
|
-
ctx->Unref();
|
|
1270
|
-
}
|
|
1271
|
-
|
|
1272
|
-
Napi::Promise GetPromise() {
|
|
1273
|
-
return deferred.Promise();
|
|
1274
|
-
}
|
|
1275
|
-
|
|
1276
|
-
protected:
|
|
1277
|
-
Napi::Promise::Deferred deferred;
|
|
1278
|
-
|
|
1279
|
-
void Execute() {
|
|
1280
|
-
try {
|
|
1281
|
-
// Perform the evaluation using llama_decode.
|
|
1282
|
-
int r = llama_decode(ctx->ctx, ctx->batch);
|
|
1283
|
-
|
|
1284
|
-
if (r != 0) {
|
|
1285
|
-
if (r == 1) {
|
|
1286
|
-
SetError("could not find a KV slot for the batch (try reducing the size of the batch or increase the context)");
|
|
1287
|
-
} else {
|
|
1288
|
-
SetError("Eval has failed");
|
|
1289
|
-
}
|
|
1290
|
-
|
|
1291
|
-
return;
|
|
1292
|
-
}
|
|
1293
|
-
|
|
1294
|
-
llama_synchronize(ctx->ctx);
|
|
1295
|
-
} catch (const std::exception& e) {
|
|
1296
|
-
SetError(e.what());
|
|
1297
|
-
} catch(...) {
|
|
1298
|
-
SetError("Unknown error when calling \"llama_decode\"");
|
|
1299
|
-
}
|
|
1300
|
-
}
|
|
1301
|
-
void OnOK() {
|
|
1302
|
-
deferred.Resolve(Env().Undefined());
|
|
1303
|
-
}
|
|
1304
|
-
void OnError(const Napi::Error& err) {
|
|
1305
|
-
deferred.Reject(err.Value());
|
|
1306
|
-
}
|
|
1307
|
-
};
|
|
1308
|
-
|
|
1309
|
-
Napi::Value AddonContext::DecodeBatch(const Napi::CallbackInfo& info) {
|
|
1310
|
-
AddonContextDecodeBatchWorker* worker = new AddonContextDecodeBatchWorker(info.Env(), this);
|
|
1311
|
-
worker->Queue();
|
|
1312
|
-
return worker->GetPromise();
|
|
1313
|
-
}
|
|
1314
|
-
|
|
1315
|
-
class AddonContextLoadContextWorker : public Napi::AsyncWorker {
|
|
1316
|
-
public:
|
|
1317
|
-
AddonContext* context;
|
|
1318
|
-
|
|
1319
|
-
AddonContextLoadContextWorker(const Napi::Env& env, AddonContext* context)
|
|
1320
|
-
: Napi::AsyncWorker(env, "AddonContextLoadContextWorker"),
|
|
1321
|
-
context(context),
|
|
1322
|
-
deferred(Napi::Promise::Deferred::New(env)) {
|
|
1323
|
-
context->Ref();
|
|
1324
|
-
}
|
|
1325
|
-
~AddonContextLoadContextWorker() {
|
|
1326
|
-
context->Unref();
|
|
1327
|
-
}
|
|
1328
|
-
|
|
1329
|
-
Napi::Promise GetPromise() {
|
|
1330
|
-
return deferred.Promise();
|
|
1331
|
-
}
|
|
1332
|
-
|
|
1333
|
-
protected:
|
|
1334
|
-
Napi::Promise::Deferred deferred;
|
|
1335
|
-
|
|
1336
|
-
void Execute() {
|
|
1337
|
-
try {
|
|
1338
|
-
context->ctx = llama_new_context_with_model(context->model->model, context->context_params);
|
|
1339
|
-
|
|
1340
|
-
context->contextLoaded = context->ctx != nullptr && context->ctx != NULL;
|
|
1341
|
-
} catch (const std::exception& e) {
|
|
1342
|
-
SetError(e.what());
|
|
1343
|
-
} catch(...) {
|
|
1344
|
-
SetError("Unknown error when calling \"llama_new_context_with_model\"");
|
|
1345
|
-
}
|
|
1346
|
-
}
|
|
1347
|
-
void OnOK() {
|
|
1348
|
-
if (context->contextLoaded) {
|
|
1349
|
-
uint64_t contextMemorySize = llama_state_get_size(context->ctx);
|
|
1350
|
-
adjustNapiExternalMemoryAdd(Env(), contextMemorySize);
|
|
1351
|
-
context->loadedContextMemorySize = contextMemorySize;
|
|
1352
|
-
}
|
|
1353
|
-
|
|
1354
|
-
deferred.Resolve(Napi::Boolean::New(Env(), context->contextLoaded));
|
|
1355
|
-
}
|
|
1356
|
-
void OnError(const Napi::Error& err) {
|
|
1357
|
-
deferred.Reject(err.Value());
|
|
1358
|
-
}
|
|
1359
|
-
};
|
|
1360
|
-
class AddonContextUnloadContextWorker : public Napi::AsyncWorker {
|
|
1361
|
-
public:
|
|
1362
|
-
AddonContext* context;
|
|
1363
|
-
|
|
1364
|
-
AddonContextUnloadContextWorker(const Napi::Env& env, AddonContext* context)
|
|
1365
|
-
: Napi::AsyncWorker(env, "AddonContextUnloadContextWorker"),
|
|
1366
|
-
context(context),
|
|
1367
|
-
deferred(Napi::Promise::Deferred::New(env)) {
|
|
1368
|
-
context->Ref();
|
|
1369
|
-
}
|
|
1370
|
-
~AddonContextUnloadContextWorker() {
|
|
1371
|
-
context->Unref();
|
|
1372
|
-
}
|
|
1373
|
-
|
|
1374
|
-
Napi::Promise GetPromise() {
|
|
1375
|
-
return deferred.Promise();
|
|
1376
|
-
}
|
|
1377
|
-
|
|
1378
|
-
protected:
|
|
1379
|
-
Napi::Promise::Deferred deferred;
|
|
1380
|
-
|
|
1381
|
-
void Execute() {
|
|
1382
|
-
try {
|
|
1383
|
-
llama_free(context->ctx);
|
|
1384
|
-
context->contextLoaded = false;
|
|
1385
|
-
|
|
1386
|
-
try {
|
|
1387
|
-
if (context->has_batch) {
|
|
1388
|
-
llama_batch_free(context->batch);
|
|
1389
|
-
context->has_batch = false;
|
|
1390
|
-
context->batch_n_tokens = 0;
|
|
1391
|
-
}
|
|
1392
|
-
|
|
1393
|
-
context->dispose();
|
|
1394
|
-
} catch (const std::exception& e) {
|
|
1395
|
-
SetError(e.what());
|
|
1396
|
-
} catch(...) {
|
|
1397
|
-
SetError("Unknown error when calling \"llama_batch_free\"");
|
|
1398
|
-
}
|
|
1399
|
-
} catch (const std::exception& e) {
|
|
1400
|
-
SetError(e.what());
|
|
1401
|
-
} catch(...) {
|
|
1402
|
-
SetError("Unknown error when calling \"llama_free\"");
|
|
1403
|
-
}
|
|
1404
|
-
}
|
|
1405
|
-
void OnOK() {
|
|
1406
|
-
adjustNapiExternalMemorySubtract(Env(), context->loadedContextMemorySize);
|
|
1407
|
-
context->loadedContextMemorySize = 0;
|
|
1408
|
-
|
|
1409
|
-
adjustNapiExternalMemorySubtract(Env(), context->batchMemorySize);
|
|
1410
|
-
context->batchMemorySize = 0;
|
|
1411
|
-
|
|
1412
|
-
deferred.Resolve(Env().Undefined());
|
|
1413
|
-
}
|
|
1414
|
-
void OnError(const Napi::Error& err) {
|
|
1415
|
-
deferred.Reject(err.Value());
|
|
1416
|
-
}
|
|
1417
|
-
};
|
|
1418
|
-
|
|
1419
|
-
Napi::Value AddonContext::Init(const Napi::CallbackInfo& info) {
|
|
1420
|
-
if (disposed) {
|
|
1421
|
-
Napi::Error::New(info.Env(), "Context is disposed").ThrowAsJavaScriptException();
|
|
1422
|
-
return info.Env().Undefined();
|
|
1423
|
-
}
|
|
1424
|
-
|
|
1425
|
-
AddonContextLoadContextWorker* worker = new AddonContextLoadContextWorker(this->Env(), this);
|
|
1426
|
-
worker->Queue();
|
|
1427
|
-
return worker->GetPromise();
|
|
1428
|
-
}
|
|
1429
|
-
Napi::Value AddonContext::Dispose(const Napi::CallbackInfo& info) {
|
|
1430
|
-
if (disposed) {
|
|
1431
|
-
return info.Env().Undefined();
|
|
1432
|
-
}
|
|
1433
|
-
|
|
1434
|
-
if (contextLoaded) {
|
|
1435
|
-
contextLoaded = false;
|
|
1436
|
-
|
|
1437
|
-
AddonContextUnloadContextWorker* worker = new AddonContextUnloadContextWorker(this->Env(), this);
|
|
1438
|
-
worker->Queue();
|
|
1439
|
-
return worker->GetPromise();
|
|
1440
|
-
} else {
|
|
1441
|
-
dispose();
|
|
1442
|
-
|
|
1443
|
-
Napi::Promise::Deferred deferred = Napi::Promise::Deferred::New(info.Env());
|
|
1444
|
-
deferred.Resolve(info.Env().Undefined());
|
|
1445
|
-
return deferred.Promise();
|
|
1446
|
-
}
|
|
1447
|
-
}
|
|
1448
|
-
|
|
1449
|
-
class AddonContextSampleTokenWorker : public Napi::AsyncWorker {
|
|
1450
|
-
public:
|
|
1451
|
-
AddonContext* ctx;
|
|
1452
|
-
AddonGrammarEvaluationState* grammar_evaluation_state;
|
|
1453
|
-
int32_t batchLogitIndex;
|
|
1454
|
-
bool use_grammar = false;
|
|
1455
|
-
llama_token result;
|
|
1456
|
-
float temperature = 0.0f;
|
|
1457
|
-
float min_p = 0;
|
|
1458
|
-
int32_t top_k = 40;
|
|
1459
|
-
float top_p = 0.95f;
|
|
1460
|
-
float repeat_penalty = 1.10f; // 1.0 = disabled
|
|
1461
|
-
float repeat_penalty_presence_penalty = 0.00f; // 0.0 = disabled
|
|
1462
|
-
float repeat_penalty_frequency_penalty = 0.00f; // 0.0 = disabled
|
|
1463
|
-
std::vector<llama_token> repeat_penalty_tokens;
|
|
1464
|
-
std::unordered_map<llama_token, float> tokenBiases;
|
|
1465
|
-
bool useTokenBiases = false;
|
|
1466
|
-
bool use_repeat_penalty = false;
|
|
1467
|
-
|
|
1468
|
-
AddonContextSampleTokenWorker(const Napi::CallbackInfo& info, AddonContext* ctx)
|
|
1469
|
-
: Napi::AsyncWorker(info.Env(), "AddonContextSampleTokenWorker"),
|
|
1470
|
-
ctx(ctx),
|
|
1471
|
-
deferred(Napi::Promise::Deferred::New(info.Env())) {
|
|
1472
|
-
ctx->Ref();
|
|
1473
|
-
|
|
1474
|
-
batchLogitIndex = info[0].As<Napi::Number>().Int32Value();
|
|
1475
|
-
|
|
1476
|
-
if (info.Length() > 1 && info[1].IsObject()) {
|
|
1477
|
-
Napi::Object options = info[1].As<Napi::Object>();
|
|
1478
|
-
|
|
1479
|
-
if (options.Has("temperature")) {
|
|
1480
|
-
temperature = options.Get("temperature").As<Napi::Number>().FloatValue();
|
|
1481
|
-
}
|
|
1482
|
-
|
|
1483
|
-
if (options.Has("minP")) {
|
|
1484
|
-
min_p = options.Get("minP").As<Napi::Number>().FloatValue();
|
|
1485
|
-
}
|
|
1486
|
-
|
|
1487
|
-
if (options.Has("topK")) {
|
|
1488
|
-
top_k = options.Get("topK").As<Napi::Number>().Int32Value();
|
|
1489
|
-
}
|
|
1490
|
-
|
|
1491
|
-
if (options.Has("topP")) {
|
|
1492
|
-
top_p = options.Get("topP").As<Napi::Number>().FloatValue();
|
|
1493
|
-
}
|
|
1494
|
-
|
|
1495
|
-
if (options.Has("repeatPenalty")) {
|
|
1496
|
-
repeat_penalty = options.Get("repeatPenalty").As<Napi::Number>().FloatValue();
|
|
1497
|
-
}
|
|
1498
|
-
|
|
1499
|
-
if (options.Has("repeatPenaltyTokens")) {
|
|
1500
|
-
Napi::Uint32Array repeat_penalty_tokens_uint32_array = options.Get("repeatPenaltyTokens").As<Napi::Uint32Array>();
|
|
1501
|
-
|
|
1502
|
-
repeat_penalty_tokens.reserve(repeat_penalty_tokens_uint32_array.ElementLength());
|
|
1503
|
-
for (size_t i = 0; i < repeat_penalty_tokens_uint32_array.ElementLength(); i++) {
|
|
1504
|
-
repeat_penalty_tokens.push_back(static_cast<llama_token>(repeat_penalty_tokens_uint32_array[i]));
|
|
1505
|
-
}
|
|
1506
|
-
|
|
1507
|
-
use_repeat_penalty = true;
|
|
1508
|
-
}
|
|
1509
|
-
|
|
1510
|
-
if (options.Has("tokenBiasKeys") && options.Has("tokenBiasValues")) {
|
|
1511
|
-
Napi::Uint32Array tokenBiasKeys = options.Get("tokenBiasKeys").As<Napi::Uint32Array>();
|
|
1512
|
-
Napi::Float32Array tokenBiasValues = options.Get("tokenBiasValues").As<Napi::Float32Array>();
|
|
1513
|
-
|
|
1514
|
-
if (tokenBiasKeys.ElementLength() == tokenBiasValues.ElementLength()) {
|
|
1515
|
-
for (size_t i = 0; i < tokenBiasKeys.ElementLength(); i++) {
|
|
1516
|
-
tokenBiases[static_cast<llama_token>(tokenBiasKeys[i])] = tokenBiasValues[i];
|
|
1517
|
-
}
|
|
1518
|
-
|
|
1519
|
-
useTokenBiases = true;
|
|
1520
|
-
}
|
|
1521
|
-
}
|
|
1522
|
-
|
|
1523
|
-
if (options.Has("repeatPenaltyPresencePenalty")) {
|
|
1524
|
-
repeat_penalty_presence_penalty = options.Get("repeatPenaltyPresencePenalty").As<Napi::Number>().FloatValue();
|
|
1525
|
-
}
|
|
1526
|
-
|
|
1527
|
-
if (options.Has("repeatPenaltyFrequencyPenalty")) {
|
|
1528
|
-
repeat_penalty_frequency_penalty = options.Get("repeatPenaltyFrequencyPenalty").As<Napi::Number>().FloatValue();
|
|
1529
|
-
}
|
|
1530
|
-
|
|
1531
|
-
if (options.Has("grammarEvaluationState")) {
|
|
1532
|
-
grammar_evaluation_state =
|
|
1533
|
-
Napi::ObjectWrap<AddonGrammarEvaluationState>::Unwrap(options.Get("grammarEvaluationState").As<Napi::Object>());
|
|
1534
|
-
grammar_evaluation_state->Ref();
|
|
1535
|
-
use_grammar = true;
|
|
1536
|
-
}
|
|
1537
|
-
}
|
|
1538
|
-
}
|
|
1539
|
-
~AddonContextSampleTokenWorker() {
|
|
1540
|
-
ctx->Unref();
|
|
1541
|
-
|
|
1542
|
-
if (use_grammar) {
|
|
1543
|
-
grammar_evaluation_state->Unref();
|
|
1544
|
-
use_grammar = false;
|
|
1545
|
-
}
|
|
1546
|
-
}
|
|
1547
|
-
|
|
1548
|
-
Napi::Promise GetPromise() {
|
|
1549
|
-
return deferred.Promise();
|
|
1550
|
-
}
|
|
1551
|
-
|
|
1552
|
-
protected:
|
|
1553
|
-
Napi::Promise::Deferred deferred;
|
|
1554
|
-
|
|
1555
|
-
void Execute() {
|
|
1556
|
-
try {
|
|
1557
|
-
SampleToken();
|
|
1558
|
-
} catch (const std::exception& e) {
|
|
1559
|
-
SetError(e.what());
|
|
1560
|
-
} catch(...) {
|
|
1561
|
-
SetError("Unknown error when calling \"SampleToken\"");
|
|
1562
|
-
}
|
|
1563
|
-
}
|
|
1564
|
-
|
|
1565
|
-
void SampleToken() {
|
|
1566
|
-
llama_token new_token_id = 0;
|
|
1567
|
-
|
|
1568
|
-
// Select the best prediction.
|
|
1569
|
-
if (llama_get_logits(ctx->ctx) == nullptr) {
|
|
1570
|
-
SetError("This model does not support token generation");
|
|
1571
|
-
return;
|
|
1572
|
-
}
|
|
1573
|
-
|
|
1574
|
-
auto logits = llama_get_logits_ith(ctx->ctx, batchLogitIndex);
|
|
1575
|
-
auto n_vocab = llama_n_vocab(ctx->model->model);
|
|
1576
|
-
|
|
1577
|
-
std::vector<llama_token_data> candidates;
|
|
1578
|
-
candidates.reserve(n_vocab);
|
|
1579
|
-
|
|
1580
|
-
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
|
|
1581
|
-
auto logit = logits[token_id];
|
|
1582
|
-
|
|
1583
|
-
if (useTokenBiases) {
|
|
1584
|
-
bool hasTokenBias = tokenBiases.find(token_id) != tokenBiases.end();
|
|
1585
|
-
if (hasTokenBias) {
|
|
1586
|
-
auto logitBias = tokenBiases.at(token_id);
|
|
1587
|
-
if (logitBias == -INFINITY || logitBias < -INFINITY) {
|
|
1588
|
-
if (!llama_token_is_eog(ctx->model->model, token_id)) {
|
|
1589
|
-
logit = -INFINITY;
|
|
1590
|
-
}
|
|
1591
|
-
} else {
|
|
1592
|
-
logit += logitBias;
|
|
1593
|
-
}
|
|
1594
|
-
}
|
|
1595
|
-
}
|
|
1596
|
-
|
|
1597
|
-
candidates.emplace_back(llama_token_data { token_id, logit, 0.0f });
|
|
1598
|
-
}
|
|
1599
|
-
|
|
1600
|
-
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
|
1601
|
-
|
|
1602
|
-
if (use_repeat_penalty && !repeat_penalty_tokens.empty()) {
|
|
1603
|
-
llama_sample_repetition_penalties(
|
|
1604
|
-
ctx->ctx,
|
|
1605
|
-
&candidates_p,
|
|
1606
|
-
repeat_penalty_tokens.data(),
|
|
1607
|
-
repeat_penalty_tokens.size(),
|
|
1608
|
-
repeat_penalty,
|
|
1609
|
-
repeat_penalty_frequency_penalty,
|
|
1610
|
-
repeat_penalty_presence_penalty
|
|
1611
|
-
);
|
|
1612
|
-
}
|
|
1613
|
-
|
|
1614
|
-
if (use_grammar && (grammar_evaluation_state)->grammar != nullptr) {
|
|
1615
|
-
llama_sample_grammar(ctx->ctx, &candidates_p, (grammar_evaluation_state)->grammar);
|
|
1616
|
-
|
|
1617
|
-
if ((candidates_p.size == 0 || candidates_p.data[0].logit == -INFINITY) && useTokenBiases) {
|
|
1618
|
-
// logit biases caused grammar sampling to fail, so sampling again without logit biases
|
|
1619
|
-
useTokenBiases = false;
|
|
1620
|
-
SampleToken();
|
|
1621
|
-
return;
|
|
1622
|
-
}
|
|
1623
|
-
}
|
|
1624
|
-
|
|
1625
|
-
if (temperature <= 0) {
|
|
1626
|
-
new_token_id = llama_sample_token_greedy(ctx->ctx, &candidates_p);
|
|
1627
|
-
} else {
|
|
1628
|
-
const int32_t resolved_top_k =
|
|
1629
|
-
top_k <= 0 ? llama_n_vocab(ctx->model->model) : std::min(top_k, llama_n_vocab(ctx->model->model));
|
|
1630
|
-
const int32_t n_probs = 0; // Number of probabilities to keep - 0 = disabled
|
|
1631
|
-
const float tfs_z = 1.00f; // Tail free sampling - 1.0 = disabled
|
|
1632
|
-
const float typical_p = 1.00f; // Typical probability - 1.0 = disabled
|
|
1633
|
-
const float resolved_top_p = top_p; // Top p sampling - 1.0 = disabled
|
|
1634
|
-
|
|
1635
|
-
// Temperature sampling
|
|
1636
|
-
size_t min_keep = std::max(1, n_probs);
|
|
1637
|
-
llama_sample_top_k(ctx->ctx, &candidates_p, resolved_top_k, min_keep);
|
|
1638
|
-
llama_sample_tail_free(ctx->ctx, &candidates_p, tfs_z, min_keep);
|
|
1639
|
-
llama_sample_typical(ctx->ctx, &candidates_p, typical_p, min_keep);
|
|
1640
|
-
llama_sample_top_p(ctx->ctx, &candidates_p, resolved_top_p, min_keep);
|
|
1641
|
-
llama_sample_min_p(ctx->ctx, &candidates_p, min_p, min_keep);
|
|
1642
|
-
llama_sample_temp(ctx->ctx, &candidates_p, temperature);
|
|
1643
|
-
new_token_id = llama_sample_token(ctx->ctx, &candidates_p);
|
|
1644
|
-
}
|
|
1645
|
-
|
|
1646
|
-
if (!llama_token_is_eog(ctx->model->model, new_token_id) && use_grammar && (grammar_evaluation_state)->grammar != nullptr) {
|
|
1647
|
-
llama_grammar_accept_token(ctx->ctx, (grammar_evaluation_state)->grammar, new_token_id);
|
|
1648
|
-
}
|
|
1649
|
-
|
|
1650
|
-
result = new_token_id;
|
|
1651
|
-
}
|
|
1652
|
-
void OnOK() {
|
|
1653
|
-
Napi::Number resultValue = Napi::Number::New(Env(), static_cast<uint32_t>(result));
|
|
1654
|
-
deferred.Resolve(resultValue);
|
|
1655
|
-
}
|
|
1656
|
-
void OnError(const Napi::Error& err) {
|
|
1657
|
-
deferred.Reject(err.Value());
|
|
1658
|
-
}
|
|
1659
|
-
};
|
|
1660
|
-
|
|
1661
|
-
Napi::Value AddonContext::SampleToken(const Napi::CallbackInfo& info) {
|
|
1662
|
-
AddonContextSampleTokenWorker* worker = new AddonContextSampleTokenWorker(info, this);
|
|
1663
|
-
worker->Queue();
|
|
1664
|
-
return worker->GetPromise();
|
|
1665
|
-
}
|
|
1666
|
-
|
|
1667
|
-
Napi::Value systemInfo(const Napi::CallbackInfo& info) {
|
|
1668
|
-
return Napi::String::From(info.Env(), llama_print_system_info());
|
|
1669
|
-
}
|
|
1670
|
-
|
|
1671
|
-
Napi::Value addonGetSupportsGpuOffloading(const Napi::CallbackInfo& info) {
|
|
1672
|
-
return Napi::Boolean::New(info.Env(), llama_supports_gpu_offload());
|
|
1673
|
-
}
|
|
1674
|
-
|
|
1675
|
-
Napi::Value addonGetSupportsMmap(const Napi::CallbackInfo& info) {
|
|
1676
|
-
return Napi::Boolean::New(info.Env(), llama_supports_mmap());
|
|
1677
|
-
}
|
|
1678
|
-
|
|
1679
|
-
Napi::Value addonGetSupportsMlock(const Napi::CallbackInfo& info) {
|
|
1680
|
-
return Napi::Boolean::New(info.Env(), llama_supports_mlock());
|
|
1681
|
-
}
|
|
1682
|
-
|
|
1683
|
-
Napi::Value addonGetBlockSizeForGgmlType(const Napi::CallbackInfo& info) {
|
|
1684
|
-
const int ggmlType = info[0].As<Napi::Number>().Int32Value();
|
|
1685
|
-
|
|
1686
|
-
if (ggmlType < 0 || ggmlType > GGML_TYPE_COUNT) {
|
|
1687
|
-
return info.Env().Undefined();
|
|
1688
|
-
}
|
|
1689
|
-
|
|
1690
|
-
const auto blockSize = ggml_blck_size(static_cast<ggml_type>(ggmlType));
|
|
1691
|
-
|
|
1692
|
-
return Napi::Number::New(info.Env(), blockSize);
|
|
1693
|
-
}
|
|
1694
|
-
|
|
1695
|
-
Napi::Value addonGetTypeSizeForGgmlType(const Napi::CallbackInfo& info) {
|
|
1696
|
-
const int ggmlType = info[0].As<Napi::Number>().Int32Value();
|
|
1697
|
-
|
|
1698
|
-
if (ggmlType < 0 || ggmlType > GGML_TYPE_COUNT) {
|
|
1699
|
-
return info.Env().Undefined();
|
|
1700
|
-
}
|
|
1701
|
-
|
|
1702
|
-
const auto typeSize = ggml_type_size(static_cast<ggml_type>(ggmlType));
|
|
1703
|
-
|
|
1704
|
-
return Napi::Number::New(info.Env(), typeSize);
|
|
1705
|
-
}
|
|
1706
|
-
|
|
1707
|
-
Napi::Value addonGetConsts(const Napi::CallbackInfo& info) {
|
|
1708
|
-
Napi::Object consts = Napi::Object::New(info.Env());
|
|
1709
|
-
consts.Set("ggmlMaxDims", Napi::Number::New(info.Env(), GGML_MAX_DIMS));
|
|
1710
|
-
consts.Set("ggmlTypeF16Size", Napi::Number::New(info.Env(), ggml_type_size(GGML_TYPE_F16)));
|
|
1711
|
-
consts.Set("ggmlTypeF32Size", Napi::Number::New(info.Env(), ggml_type_size(GGML_TYPE_F32)));
|
|
1712
|
-
consts.Set("ggmlTensorOverhead", Napi::Number::New(info.Env(), ggml_tensor_overhead()));
|
|
1713
|
-
consts.Set("llamaMaxRngState", Napi::Number::New(info.Env(), LLAMA_MAX_RNG_STATE));
|
|
1714
|
-
consts.Set("llamaPosSize", Napi::Number::New(info.Env(), sizeof(llama_pos)));
|
|
1715
|
-
consts.Set("llamaSeqIdSize", Napi::Number::New(info.Env(), sizeof(llama_seq_id)));
|
|
1716
|
-
|
|
1717
|
-
return consts;
|
|
1718
|
-
}
|
|
1719
|
-
|
|
1720
|
-
int addonGetGgmlLogLevelNumber(ggml_log_level level) {
|
|
1721
|
-
switch (level) {
|
|
1722
|
-
case GGML_LOG_LEVEL_ERROR: return 2;
|
|
1723
|
-
case GGML_LOG_LEVEL_WARN: return 3;
|
|
1724
|
-
case GGML_LOG_LEVEL_INFO: return 4;
|
|
1725
|
-
case GGML_LOG_LEVEL_DEBUG: return 5;
|
|
1726
|
-
}
|
|
1727
|
-
|
|
1728
|
-
return 1;
|
|
1729
|
-
}
|
|
1730
|
-
|
|
1731
|
-
void addonCallJsLogCallback(
|
|
1732
|
-
Napi::Env env, Napi::Function callback, AddonThreadSafeLogCallbackFunctionContext* context, addon_logger_log* data
|
|
1733
|
-
) {
|
|
1734
|
-
bool called = false;
|
|
1735
|
-
|
|
1736
|
-
if (env != nullptr && callback != nullptr && addonJsLoggerCallbackSet) {
|
|
1737
|
-
try {
|
|
1738
|
-
callback.Call({
|
|
1739
|
-
Napi::Number::New(env, data->logLevelNumber),
|
|
1740
|
-
Napi::String::New(env, data->stringStream->str()),
|
|
1741
|
-
});
|
|
1742
|
-
called = true;
|
|
1743
|
-
} catch (const Napi::Error& e) {
|
|
1744
|
-
called = false;
|
|
1745
|
-
}
|
|
1746
|
-
}
|
|
1747
|
-
|
|
1748
|
-
if (!called && data != nullptr) {
|
|
1749
|
-
if (data->logLevelNumber == 2) {
|
|
1750
|
-
fputs(data->stringStream->str().c_str(), stderr);
|
|
1751
|
-
fflush(stderr);
|
|
1752
|
-
} else {
|
|
1753
|
-
fputs(data->stringStream->str().c_str(), stdout);
|
|
1754
|
-
fflush(stdout);
|
|
1755
|
-
}
|
|
1756
|
-
}
|
|
1757
|
-
|
|
1758
|
-
if (data != nullptr) {
|
|
1759
|
-
delete data->stringStream;
|
|
1760
|
-
delete data;
|
|
1761
|
-
}
|
|
1762
|
-
}
|
|
1763
|
-
|
|
1764
|
-
static void addonLlamaCppLogCallback(ggml_log_level level, const char* text, void* user_data) {
|
|
1765
|
-
int logLevelNumber = addonGetGgmlLogLevelNumber(level);
|
|
1766
|
-
|
|
1767
|
-
if (logLevelNumber > addonLoggerLogLevel) {
|
|
1768
|
-
return;
|
|
1769
|
-
}
|
|
1770
|
-
|
|
1771
|
-
if (addonJsLoggerCallbackSet) {
|
|
1772
|
-
std::stringstream* stringStream = new std::stringstream();
|
|
1773
|
-
if (text != nullptr) {
|
|
1774
|
-
*stringStream << text;
|
|
1775
|
-
}
|
|
1776
|
-
|
|
1777
|
-
addon_logger_log* data = new addon_logger_log {
|
|
1778
|
-
logLevelNumber,
|
|
1779
|
-
stringStream,
|
|
1780
|
-
};
|
|
1781
|
-
|
|
1782
|
-
auto status = addonThreadSafeLoggerCallback.NonBlockingCall(data);
|
|
1783
|
-
|
|
1784
|
-
if (status == napi_ok) {
|
|
1785
|
-
return;
|
|
1786
|
-
} else {
|
|
1787
|
-
delete stringStream;
|
|
1788
|
-
delete data;
|
|
1789
|
-
}
|
|
1790
|
-
}
|
|
1791
|
-
|
|
1792
|
-
if (text != nullptr) {
|
|
1793
|
-
if (level == 2) {
|
|
1794
|
-
fputs(text, stderr);
|
|
1795
|
-
fflush(stderr);
|
|
1796
|
-
} else {
|
|
1797
|
-
fputs(text, stdout);
|
|
1798
|
-
fflush(stdout);
|
|
1799
|
-
}
|
|
1800
|
-
}
|
|
1801
|
-
}
|
|
1802
|
-
|
|
1803
|
-
Napi::Value setLogger(const Napi::CallbackInfo& info) {
|
|
1804
|
-
if (info.Length() < 1 || !info[0].IsFunction()) {
|
|
1805
|
-
if (addonJsLoggerCallbackSet) {
|
|
1806
|
-
addonJsLoggerCallbackSet = false;
|
|
1807
|
-
addonThreadSafeLoggerCallback.Release();
|
|
1808
|
-
}
|
|
1809
|
-
|
|
1810
|
-
return info.Env().Undefined();
|
|
1811
|
-
}
|
|
1812
|
-
|
|
1813
|
-
auto addonLoggerJSCallback = info[0].As<Napi::Function>();
|
|
1814
|
-
AddonThreadSafeLogCallbackFunctionContext* context = new Napi::Reference<Napi::Value>(Napi::Persistent(info.This()));
|
|
1815
|
-
addonThreadSafeLoggerCallback = AddonThreadSafeLogCallbackFunction::New(
|
|
1816
|
-
info.Env(),
|
|
1817
|
-
addonLoggerJSCallback,
|
|
1818
|
-
"loggerCallback",
|
|
1819
|
-
0,
|
|
1820
|
-
1,
|
|
1821
|
-
context,
|
|
1822
|
-
[](Napi::Env, void*, AddonThreadSafeLogCallbackFunctionContext* ctx) {
|
|
1823
|
-
addonJsLoggerCallbackSet = false;
|
|
1824
|
-
|
|
1825
|
-
delete ctx;
|
|
1826
|
-
}
|
|
1827
|
-
);
|
|
1828
|
-
addonJsLoggerCallbackSet = true;
|
|
1829
|
-
|
|
1830
|
-
// prevent blocking the main node process from exiting due to active resources
|
|
1831
|
-
addonThreadSafeLoggerCallback.Unref(info.Env());
|
|
1832
|
-
|
|
1833
|
-
return info.Env().Undefined();
|
|
1834
|
-
}
|
|
1835
|
-
|
|
1836
|
-
Napi::Value setLoggerLogLevel(const Napi::CallbackInfo& info) {
|
|
1837
|
-
if (info.Length() < 1 || !info[0].IsNumber()) {
|
|
1838
|
-
addonLoggerLogLevel = 5;
|
|
1839
|
-
|
|
1840
|
-
return info.Env().Undefined();
|
|
1841
|
-
}
|
|
1842
|
-
|
|
1843
|
-
addonLoggerLogLevel = info[0].As<Napi::Number>().Int32Value();
|
|
1844
|
-
|
|
1845
|
-
return info.Env().Undefined();
|
|
1846
|
-
}
|
|
1847
|
-
|
|
1848
|
-
class AddonBackendLoadWorker : public Napi::AsyncWorker {
|
|
1849
|
-
public:
|
|
1850
|
-
AddonBackendLoadWorker(const Napi::Env& env)
|
|
1851
|
-
: Napi::AsyncWorker(env, "AddonBackendLoadWorker"),
|
|
1852
|
-
deferred(Napi::Promise::Deferred::New(env)) {
|
|
1853
|
-
}
|
|
1854
|
-
~AddonBackendLoadWorker() {
|
|
1855
|
-
}
|
|
1856
|
-
|
|
1857
|
-
Napi::Promise GetPromise() {
|
|
1858
|
-
return deferred.Promise();
|
|
1859
|
-
}
|
|
1860
|
-
|
|
1861
|
-
protected:
|
|
1862
|
-
Napi::Promise::Deferred deferred;
|
|
1863
|
-
|
|
1864
|
-
void Execute() {
|
|
1865
|
-
try {
|
|
1866
|
-
llama_backend_init();
|
|
1867
|
-
|
|
1868
|
-
try {
|
|
1869
|
-
if (backendDisposed) {
|
|
1870
|
-
llama_backend_free();
|
|
1871
|
-
} else {
|
|
1872
|
-
backendInitialized = true;
|
|
1873
|
-
}
|
|
1874
|
-
} catch (const std::exception& e) {
|
|
1875
|
-
SetError(e.what());
|
|
1876
|
-
} catch(...) {
|
|
1877
|
-
SetError("Unknown error when calling \"llama_backend_free\"");
|
|
1878
|
-
}
|
|
1879
|
-
} catch (const std::exception& e) {
|
|
1880
|
-
SetError(e.what());
|
|
1881
|
-
} catch(...) {
|
|
1882
|
-
SetError("Unknown error when calling \"llama_backend_init\"");
|
|
1883
|
-
}
|
|
1884
|
-
}
|
|
1885
|
-
void OnOK() {
|
|
1886
|
-
deferred.Resolve(Env().Undefined());
|
|
1887
|
-
}
|
|
1888
|
-
void OnError(const Napi::Error& err) {
|
|
1889
|
-
deferred.Reject(err.Value());
|
|
1890
|
-
}
|
|
1891
|
-
};
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
class AddonBackendUnloadWorker : public Napi::AsyncWorker {
|
|
1895
|
-
public:
|
|
1896
|
-
AddonBackendUnloadWorker(const Napi::Env& env)
|
|
1897
|
-
: Napi::AsyncWorker(env, "AddonBackendUnloadWorker"),
|
|
1898
|
-
deferred(Napi::Promise::Deferred::New(env)) {
|
|
1899
|
-
}
|
|
1900
|
-
~AddonBackendUnloadWorker() {
|
|
1901
|
-
}
|
|
1902
|
-
|
|
1903
|
-
Napi::Promise GetPromise() {
|
|
1904
|
-
return deferred.Promise();
|
|
1905
|
-
}
|
|
1906
|
-
|
|
1907
|
-
protected:
|
|
1908
|
-
Napi::Promise::Deferred deferred;
|
|
1909
|
-
|
|
1910
|
-
void Execute() {
|
|
1911
|
-
try {
|
|
1912
|
-
if (backendInitialized) {
|
|
1913
|
-
backendInitialized = false;
|
|
1914
|
-
llama_backend_free();
|
|
1915
|
-
}
|
|
1916
|
-
} catch (const std::exception& e) {
|
|
1917
|
-
SetError(e.what());
|
|
1918
|
-
} catch(...) {
|
|
1919
|
-
SetError("Unknown error when calling \"llama_backend_free\"");
|
|
1920
|
-
}
|
|
1921
|
-
}
|
|
1922
|
-
void OnOK() {
|
|
1923
|
-
deferred.Resolve(Env().Undefined());
|
|
1924
|
-
}
|
|
1925
|
-
void OnError(const Napi::Error& err) {
|
|
1926
|
-
deferred.Reject(err.Value());
|
|
1927
|
-
}
|
|
1928
|
-
};
|
|
1929
|
-
|
|
1930
|
-
Napi::Value addonInit(const Napi::CallbackInfo& info) {
|
|
1931
|
-
if (backendInitialized) {
|
|
1932
|
-
Napi::Promise::Deferred deferred = Napi::Promise::Deferred::New(info.Env());
|
|
1933
|
-
deferred.Resolve(info.Env().Undefined());
|
|
1934
|
-
return deferred.Promise();
|
|
1935
|
-
}
|
|
1936
|
-
|
|
1937
|
-
AddonBackendLoadWorker* worker = new AddonBackendLoadWorker(info.Env());
|
|
1938
|
-
worker->Queue();
|
|
1939
|
-
return worker->GetPromise();
|
|
1940
|
-
}
|
|
1941
|
-
|
|
1942
|
-
Napi::Value addonDispose(const Napi::CallbackInfo& info) {
|
|
1943
|
-
if (backendDisposed) {
|
|
1944
|
-
Napi::Promise::Deferred deferred = Napi::Promise::Deferred::New(info.Env());
|
|
1945
|
-
deferred.Resolve(info.Env().Undefined());
|
|
1946
|
-
return deferred.Promise();
|
|
1947
|
-
}
|
|
1948
|
-
|
|
1949
|
-
backendDisposed = true;
|
|
1950
|
-
|
|
1951
|
-
AddonBackendUnloadWorker* worker = new AddonBackendUnloadWorker(info.Env());
|
|
1952
|
-
worker->Queue();
|
|
1953
|
-
return worker->GetPromise();
|
|
1954
|
-
}
|
|
1955
|
-
|
|
1956
|
-
static void addonFreeLlamaBackend(Napi::Env env, int* data) {
|
|
1957
|
-
if (backendDisposed) {
|
|
1958
|
-
return;
|
|
1959
|
-
}
|
|
1960
|
-
|
|
1961
|
-
backendDisposed = true;
|
|
1962
|
-
if (backendInitialized) {
|
|
1963
|
-
backendInitialized = false;
|
|
1964
|
-
llama_backend_free();
|
|
1965
|
-
}
|
|
1966
|
-
}
|
|
1967
|
-
|
|
1968
|
-
Napi::Object registerCallback(Napi::Env env, Napi::Object exports) {
|
|
1969
|
-
exports.DefineProperties({
|
|
1970
|
-
Napi::PropertyDescriptor::Function("systemInfo", systemInfo),
|
|
1971
|
-
Napi::PropertyDescriptor::Function("getSupportsGpuOffloading", addonGetSupportsGpuOffloading),
|
|
1972
|
-
Napi::PropertyDescriptor::Function("getSupportsMmap", addonGetSupportsMmap),
|
|
1973
|
-
Napi::PropertyDescriptor::Function("getSupportsMlock", addonGetSupportsMlock),
|
|
1974
|
-
Napi::PropertyDescriptor::Function("getBlockSizeForGgmlType", addonGetBlockSizeForGgmlType),
|
|
1975
|
-
Napi::PropertyDescriptor::Function("getTypeSizeForGgmlType", addonGetTypeSizeForGgmlType),
|
|
1976
|
-
Napi::PropertyDescriptor::Function("getConsts", addonGetConsts),
|
|
1977
|
-
Napi::PropertyDescriptor::Function("setLogger", setLogger),
|
|
1978
|
-
Napi::PropertyDescriptor::Function("setLoggerLogLevel", setLoggerLogLevel),
|
|
1979
|
-
Napi::PropertyDescriptor::Function("getGpuVramInfo", getGpuVramInfo),
|
|
1980
|
-
Napi::PropertyDescriptor::Function("getGpuDeviceInfo", getGpuDeviceInfo),
|
|
1981
|
-
Napi::PropertyDescriptor::Function("getGpuType", getGpuType),
|
|
1982
|
-
Napi::PropertyDescriptor::Function("init", addonInit),
|
|
1983
|
-
Napi::PropertyDescriptor::Function("dispose", addonDispose),
|
|
1984
|
-
});
|
|
1985
|
-
AddonModel::init(exports);
|
|
1986
|
-
AddonGrammar::init(exports);
|
|
1987
|
-
AddonGrammarEvaluationState::init(exports);
|
|
1988
|
-
AddonContext::init(exports);
|
|
1989
|
-
|
|
1990
|
-
llama_log_set(addonLlamaCppLogCallback, nullptr);
|
|
1991
|
-
|
|
1992
|
-
exports.AddFinalizer(addonFreeLlamaBackend, static_cast<int*>(nullptr));
|
|
1993
|
-
|
|
1994
|
-
return exports;
|
|
1995
|
-
}
|
|
1996
|
-
|
|
1997
|
-
NODE_API_MODULE(NODE_GYP_MODULE_NAME, registerCallback)
|