@fugood/llama.node 0.3.10 → 0.3.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-cuda/arm64/llama-node.node +0 -0
- package/bin/linux-cuda/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/arm64/llama-node.node +0 -0
- package/bin/win32/arm64/node.lib +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/lib/binding.ts +1 -0
- package/lib/index.js +27 -1
- package/lib/index.ts +32 -0
- package/package.json +5 -3
- package/src/LlamaContext.cpp +48 -4
- package/src/LlamaContext.h +1 -0
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
package/bin/win32/arm64/node.lib
CHANGED
|
Binary file
|
|
Binary file
|
package/bin/win32/x64/node.lib
CHANGED
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
package/lib/binding.ts
CHANGED
|
@@ -150,6 +150,7 @@ export interface LlamaContext {
|
|
|
150
150
|
getLoadedLoraAdapters(): { path: string; scaled: number }[]
|
|
151
151
|
// static
|
|
152
152
|
loadModelInfo(path: string, skip: string[]): Promise<Object>
|
|
153
|
+
toggleNativeLog(enable: boolean, callback: (level: string, text: string) => void): void
|
|
153
154
|
}
|
|
154
155
|
|
|
155
156
|
export interface Module {
|
package/lib/index.js
CHANGED
|
@@ -23,14 +23,39 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
23
23
|
});
|
|
24
24
|
};
|
|
25
25
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
26
|
-
exports.loadLlamaModelInfo = exports.initLlama = exports.loadModel = void 0;
|
|
26
|
+
exports.loadLlamaModelInfo = exports.initLlama = exports.loadModel = exports.toggleNativeLog = void 0;
|
|
27
|
+
exports.addNativeLogListener = addNativeLogListener;
|
|
27
28
|
const binding_1 = require("./binding");
|
|
28
29
|
__exportStar(require("./binding"), exports);
|
|
29
30
|
const mods = {};
|
|
31
|
+
const logListeners = [];
|
|
32
|
+
const logCallback = (level, text) => {
|
|
33
|
+
logListeners.forEach((listener) => listener(level, text));
|
|
34
|
+
};
|
|
35
|
+
let logEnabled = false;
|
|
36
|
+
const refreshNativeLogSetup = () => {
|
|
37
|
+
Object.entries(mods).forEach(([, mod]) => {
|
|
38
|
+
mod.LlamaContext.toggleNativeLog(logEnabled, logCallback);
|
|
39
|
+
});
|
|
40
|
+
};
|
|
41
|
+
const toggleNativeLog = (enable) => __awaiter(void 0, void 0, void 0, function* () {
|
|
42
|
+
logEnabled = enable;
|
|
43
|
+
refreshNativeLogSetup();
|
|
44
|
+
});
|
|
45
|
+
exports.toggleNativeLog = toggleNativeLog;
|
|
46
|
+
function addNativeLogListener(listener) {
|
|
47
|
+
logListeners.push(listener);
|
|
48
|
+
return {
|
|
49
|
+
remove: () => {
|
|
50
|
+
logListeners.splice(logListeners.indexOf(listener), 1);
|
|
51
|
+
},
|
|
52
|
+
};
|
|
53
|
+
}
|
|
30
54
|
const loadModel = (options) => __awaiter(void 0, void 0, void 0, function* () {
|
|
31
55
|
var _a, _b;
|
|
32
56
|
const variant = (_a = options.lib_variant) !== null && _a !== void 0 ? _a : 'default';
|
|
33
57
|
(_b = mods[variant]) !== null && _b !== void 0 ? _b : (mods[variant] = yield (0, binding_1.loadModule)(options.lib_variant));
|
|
58
|
+
refreshNativeLogSetup();
|
|
34
59
|
return new mods[variant].LlamaContext(options);
|
|
35
60
|
});
|
|
36
61
|
exports.loadModel = loadModel;
|
|
@@ -46,6 +71,7 @@ const loadLlamaModelInfo = (path) => __awaiter(void 0, void 0, void 0, function*
|
|
|
46
71
|
var _a;
|
|
47
72
|
const variant = 'default';
|
|
48
73
|
(_a = mods[variant]) !== null && _a !== void 0 ? _a : (mods[variant] = yield (0, binding_1.loadModule)(variant));
|
|
74
|
+
refreshNativeLogSetup();
|
|
49
75
|
return mods[variant].LlamaContext.loadModelInfo(path, modelInfoSkip);
|
|
50
76
|
});
|
|
51
77
|
exports.loadLlamaModelInfo = loadLlamaModelInfo;
|
package/lib/index.ts
CHANGED
|
@@ -9,11 +9,42 @@ export interface LlamaModelOptionsExtended extends LlamaModelOptions {
|
|
|
9
9
|
|
|
10
10
|
const mods: { [key: string]: Module } = {}
|
|
11
11
|
|
|
12
|
+
const logListeners: Array<(level: string, text: string) => void> = []
|
|
13
|
+
|
|
14
|
+
const logCallback = (level: string, text: string) => {
|
|
15
|
+
logListeners.forEach((listener) => listener(level, text))
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
let logEnabled = false
|
|
19
|
+
|
|
20
|
+
const refreshNativeLogSetup = () => {
|
|
21
|
+
Object.entries(mods).forEach(([, mod]) => {
|
|
22
|
+
mod.LlamaContext.toggleNativeLog(logEnabled, logCallback)
|
|
23
|
+
})
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export const toggleNativeLog = async (enable: boolean) => {
|
|
27
|
+
logEnabled = enable
|
|
28
|
+
refreshNativeLogSetup()
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export function addNativeLogListener(
|
|
32
|
+
listener: (level: string, text: string) => void,
|
|
33
|
+
): { remove: () => void } {
|
|
34
|
+
logListeners.push(listener)
|
|
35
|
+
return {
|
|
36
|
+
remove: () => {
|
|
37
|
+
logListeners.splice(logListeners.indexOf(listener), 1)
|
|
38
|
+
},
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
12
42
|
export const loadModel = async (
|
|
13
43
|
options: LlamaModelOptionsExtended,
|
|
14
44
|
): Promise<LlamaContext> => {
|
|
15
45
|
const variant = options.lib_variant ?? 'default'
|
|
16
46
|
mods[variant] ??= await loadModule(options.lib_variant)
|
|
47
|
+
refreshNativeLogSetup()
|
|
17
48
|
return new mods[variant].LlamaContext(options)
|
|
18
49
|
}
|
|
19
50
|
|
|
@@ -30,5 +61,6 @@ const modelInfoSkip = [
|
|
|
30
61
|
export const loadLlamaModelInfo = async (path: string): Promise<Object> => {
|
|
31
62
|
const variant = 'default'
|
|
32
63
|
mods[variant] ??= await loadModule(variant)
|
|
64
|
+
refreshNativeLogSetup()
|
|
33
65
|
return mods[variant].LlamaContext.loadModelInfo(path, modelInfoSkip)
|
|
34
66
|
}
|
package/package.json
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@fugood/llama.node",
|
|
3
3
|
"access": "public",
|
|
4
|
-
"version": "0.3.
|
|
5
|
-
"description": "
|
|
4
|
+
"version": "0.3.12",
|
|
5
|
+
"description": "An another Node binding of llama.cpp",
|
|
6
6
|
"main": "lib/index.js",
|
|
7
7
|
"scripts": {
|
|
8
8
|
"test": "jest",
|
|
@@ -22,7 +22,9 @@
|
|
|
22
22
|
"llama",
|
|
23
23
|
"llm",
|
|
24
24
|
"ai",
|
|
25
|
-
"genai"
|
|
25
|
+
"genai",
|
|
26
|
+
"Local LLM",
|
|
27
|
+
"llama.cpp"
|
|
26
28
|
],
|
|
27
29
|
"author": "Hans <hans.chen@bricks.tools>",
|
|
28
30
|
"license": "MIT",
|
package/src/LlamaContext.cpp
CHANGED
|
@@ -120,6 +120,9 @@ void LlamaContext::Init(Napi::Env env, Napi::Object &exports) {
|
|
|
120
120
|
"release", static_cast<napi_property_attributes>(napi_enumerable)),
|
|
121
121
|
StaticMethod<&LlamaContext::ModelInfo>(
|
|
122
122
|
"loadModelInfo",
|
|
123
|
+
static_cast<napi_property_attributes>(napi_enumerable)),
|
|
124
|
+
StaticMethod<&LlamaContext::ToggleNativeLog>(
|
|
125
|
+
"toggleNativeLog",
|
|
123
126
|
static_cast<napi_property_attributes>(napi_enumerable))});
|
|
124
127
|
Napi::FunctionReference *constructor = new Napi::FunctionReference();
|
|
125
128
|
*constructor = Napi::Persistent(func);
|
|
@@ -278,6 +281,46 @@ bool validateModelChatTemplate(const struct llama_model * model, const bool use_
|
|
|
278
281
|
return common_chat_verify_template(tmpl, use_jinja);
|
|
279
282
|
}
|
|
280
283
|
|
|
284
|
+
static Napi::FunctionReference _log_callback;
|
|
285
|
+
|
|
286
|
+
// toggleNativeLog(enable: boolean, callback: (log: string) => void): void
|
|
287
|
+
void LlamaContext::ToggleNativeLog(const Napi::CallbackInfo &info) {
|
|
288
|
+
bool enable = info[0].ToBoolean().Value();
|
|
289
|
+
if (enable) {
|
|
290
|
+
_log_callback.Reset(info[1].As<Napi::Function>());
|
|
291
|
+
|
|
292
|
+
llama_log_set([](ggml_log_level level, const char * text, void * user_data) {
|
|
293
|
+
llama_log_callback_default(level, text, user_data);
|
|
294
|
+
|
|
295
|
+
std::string level_str = "";
|
|
296
|
+
if (level == GGML_LOG_LEVEL_ERROR) {
|
|
297
|
+
level_str = "error";
|
|
298
|
+
} else if (level == GGML_LOG_LEVEL_INFO) {
|
|
299
|
+
level_str = "info";
|
|
300
|
+
} else if (level == GGML_LOG_LEVEL_WARN) {
|
|
301
|
+
level_str = "warn";
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
if (_log_callback.IsEmpty()) {
|
|
305
|
+
return;
|
|
306
|
+
}
|
|
307
|
+
try {
|
|
308
|
+
Napi::Env env = _log_callback.Env();
|
|
309
|
+
Napi::HandleScope scope(env);
|
|
310
|
+
_log_callback.Call({
|
|
311
|
+
Napi::String::New(env, level_str),
|
|
312
|
+
Napi::String::New(env, text)
|
|
313
|
+
});
|
|
314
|
+
} catch (const std::exception &e) {
|
|
315
|
+
// printf("Error calling log callback: %s\n", e.what());
|
|
316
|
+
}
|
|
317
|
+
}, nullptr);
|
|
318
|
+
} else {
|
|
319
|
+
_log_callback.Reset();
|
|
320
|
+
llama_log_set(llama_log_callback_default, nullptr);
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
|
|
281
324
|
// getModelInfo(): object
|
|
282
325
|
Napi::Value LlamaContext::GetModelInfo(const Napi::CallbackInfo &info) {
|
|
283
326
|
char desc[1024];
|
|
@@ -312,8 +355,7 @@ Napi::Value LlamaContext::GetModelInfo(const Napi::CallbackInfo &info) {
|
|
|
312
355
|
defaultCaps.Set("parallelToolCalls", _templates.template_default->original_caps().supports_parallel_tool_calls);
|
|
313
356
|
defaultCaps.Set("toolCallId", _templates.template_default->original_caps().supports_tool_call_id);
|
|
314
357
|
minja.Set("defaultCaps", defaultCaps);
|
|
315
|
-
|
|
316
|
-
toolUse.Set("toolUse", validateModelChatTemplate(model, true, "tool_use"));
|
|
358
|
+
minja.Set("toolUse", validateModelChatTemplate(model, true, "tool_use"));
|
|
317
359
|
if (_templates.template_tool_use) {
|
|
318
360
|
Napi::Object toolUseCaps = Napi::Object::New(info.Env());
|
|
319
361
|
toolUseCaps.Set("tools", _templates.template_tool_use->original_caps().supports_tools);
|
|
@@ -322,13 +364,15 @@ Napi::Value LlamaContext::GetModelInfo(const Napi::CallbackInfo &info) {
|
|
|
322
364
|
toolUseCaps.Set("systemRole", _templates.template_tool_use->original_caps().supports_system_role);
|
|
323
365
|
toolUseCaps.Set("parallelToolCalls", _templates.template_tool_use->original_caps().supports_parallel_tool_calls);
|
|
324
366
|
toolUseCaps.Set("toolCallId", _templates.template_tool_use->original_caps().supports_tool_call_id);
|
|
325
|
-
|
|
367
|
+
minja.Set("toolUseCaps", toolUseCaps);
|
|
326
368
|
}
|
|
327
|
-
minja.Set("toolUse", toolUse);
|
|
328
369
|
chatTemplates.Set("minja", minja);
|
|
329
370
|
details.Set("chatTemplates", chatTemplates);
|
|
330
371
|
|
|
331
372
|
details.Set("metadata", metadata);
|
|
373
|
+
|
|
374
|
+
// Deprecated: use chatTemplates.llamaChat instead
|
|
375
|
+
details.Set("isChatTemplateSupported", validateModelChatTemplate(_sess->model(), false, ""));
|
|
332
376
|
return details;
|
|
333
377
|
}
|
|
334
378
|
|
package/src/LlamaContext.h
CHANGED
|
@@ -5,6 +5,7 @@ class LlamaCompletionWorker;
|
|
|
5
5
|
class LlamaContext : public Napi::ObjectWrap<LlamaContext> {
|
|
6
6
|
public:
|
|
7
7
|
LlamaContext(const Napi::CallbackInfo &info);
|
|
8
|
+
static void ToggleNativeLog(const Napi::CallbackInfo &info);
|
|
8
9
|
static Napi::Value ModelInfo(const Napi::CallbackInfo& info);
|
|
9
10
|
static void Init(Napi::Env env, Napi::Object &exports);
|
|
10
11
|
|