@fugood/llama.node 1.0.0-beta.2 → 1.0.0-beta.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CMakeLists.txt +10 -0
- package/package.json +21 -21
- package/scripts/{build.js → check.js} +7 -20
- package/src/llama.cpp/common/chat.cpp +0 -10
- package/src/llama.cpp/common/chat.h +9 -1
- package/src/llama.cpp/common/common.cpp +1 -0
- package/src/llama.cpp/common/common.h +1 -0
- package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +1 -1
package/CMakeLists.txt
CHANGED
|
@@ -35,6 +35,7 @@ string(REPLACE "arm64x" "arm64" ARCH ${ARCH})
|
|
|
35
35
|
string(REPLACE "aarch64" "arm64" ARCH ${ARCH})
|
|
36
36
|
|
|
37
37
|
option(TO_PACKAGE "Build as package" OFF)
|
|
38
|
+
option(CLANG_USE_OMP "Use OpenMP in Clang" OFF)
|
|
38
39
|
|
|
39
40
|
if(DEFINED VARIANT)
|
|
40
41
|
set(VARIANT -${VARIANT})
|
|
@@ -96,6 +97,15 @@ if (VULKAN_SDK)
|
|
|
96
97
|
find_package(Vulkan REQUIRED)
|
|
97
98
|
endif()
|
|
98
99
|
|
|
100
|
+
# Avoid libomp is not installed commonly
|
|
101
|
+
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND NOT DEFINED GGML_OPENMP OR GGML_OPENMP AND NOT CLANG_USE_OMP)
|
|
102
|
+
find_package(OpenMP)
|
|
103
|
+
if (OpenMP_FOUND)
|
|
104
|
+
string(REPLACE "libomp" "libgomp" OpenMP_CXX_FLAGS "${OpenMP_CXX_FLAGS}")
|
|
105
|
+
string(REPLACE "libomp" "libgomp" OpenMP_C_FLAGS "${OpenMP_C_FLAGS}")
|
|
106
|
+
endif()
|
|
107
|
+
endif()
|
|
108
|
+
|
|
99
109
|
set(LLAMA_BUILD_COMMON ON CACHE BOOL "Build common")
|
|
100
110
|
|
|
101
111
|
set(LLAMA_CURL OFF CACHE BOOL "Build curl")
|
package/package.json
CHANGED
|
@@ -1,15 +1,17 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@fugood/llama.node",
|
|
3
3
|
"access": "public",
|
|
4
|
-
"version": "1.0.0-beta.
|
|
4
|
+
"version": "1.0.0-beta.4",
|
|
5
5
|
"description": "An another Node binding of llama.cpp",
|
|
6
6
|
"main": "lib/index.js",
|
|
7
7
|
"scripts": {
|
|
8
|
-
"
|
|
8
|
+
"bootstrap": "npm install --omit=optional",
|
|
9
|
+
"postinstall": "node scripts/check.js",
|
|
9
10
|
"pretest": "node scripts/download-test-models.js",
|
|
10
11
|
"test": "jest",
|
|
11
|
-
"build": "
|
|
12
|
-
"
|
|
12
|
+
"build": "npx cmake-js build",
|
|
13
|
+
"build-js": "tsc",
|
|
14
|
+
"prepack": "npm run build-js",
|
|
13
15
|
"prebuild-native": "node scripts/generate_win_dynamic_load.js 6",
|
|
14
16
|
"build-native": "cmake-js compile",
|
|
15
17
|
"clean": "rimraf build",
|
|
@@ -47,7 +49,7 @@
|
|
|
47
49
|
]
|
|
48
50
|
},
|
|
49
51
|
"files": [
|
|
50
|
-
"scripts/
|
|
52
|
+
"scripts/check.js",
|
|
51
53
|
"scripts/llama.cpp.patch",
|
|
52
54
|
"src/*.{cc,c,h,hpp}",
|
|
53
55
|
"src/DecodeAudioTokenWorker.cpp",
|
|
@@ -68,19 +70,19 @@
|
|
|
68
70
|
"CMakeLists.txt"
|
|
69
71
|
],
|
|
70
72
|
"optionalDependencies": {
|
|
71
|
-
"@fugood/node-llama-linux-x64": "1.0.0-beta.
|
|
72
|
-
"@fugood/node-llama-linux-x64-vulkan": "1.0.0-beta.
|
|
73
|
-
"@fugood/node-llama-linux-x64-cuda": "1.0.0-beta.
|
|
74
|
-
"@fugood/node-llama-linux-arm64": "1.0.0-beta.
|
|
75
|
-
"@fugood/node-llama-linux-arm64-vulkan": "1.0.0-beta.
|
|
76
|
-
"@fugood/node-llama-linux-arm64-cuda": "1.0.0-beta.
|
|
77
|
-
"@fugood/node-llama-win32-x64": "1.0.0-beta.
|
|
78
|
-
"@fugood/node-llama-win32-x64-vulkan": "1.0.0-beta.
|
|
79
|
-
"@fugood/node-llama-win32-x64-cuda": "1.0.0-beta.
|
|
80
|
-
"@fugood/node-llama-win32-arm64": "1.0.0-beta.
|
|
81
|
-
"@fugood/node-llama-win32-arm64-vulkan": "1.0.0-beta.
|
|
82
|
-
"@fugood/node-llama-darwin-x64": "1.0.0-beta.
|
|
83
|
-
"@fugood/node-llama-darwin-arm64": "1.0.0-beta.
|
|
73
|
+
"@fugood/node-llama-linux-x64": "1.0.0-beta.4",
|
|
74
|
+
"@fugood/node-llama-linux-x64-vulkan": "1.0.0-beta.4",
|
|
75
|
+
"@fugood/node-llama-linux-x64-cuda": "1.0.0-beta.4",
|
|
76
|
+
"@fugood/node-llama-linux-arm64": "1.0.0-beta.4",
|
|
77
|
+
"@fugood/node-llama-linux-arm64-vulkan": "1.0.0-beta.4",
|
|
78
|
+
"@fugood/node-llama-linux-arm64-cuda": "1.0.0-beta.4",
|
|
79
|
+
"@fugood/node-llama-win32-x64": "1.0.0-beta.4",
|
|
80
|
+
"@fugood/node-llama-win32-x64-vulkan": "1.0.0-beta.4",
|
|
81
|
+
"@fugood/node-llama-win32-x64-cuda": "1.0.0-beta.4",
|
|
82
|
+
"@fugood/node-llama-win32-arm64": "1.0.0-beta.4",
|
|
83
|
+
"@fugood/node-llama-win32-arm64-vulkan": "1.0.0-beta.4",
|
|
84
|
+
"@fugood/node-llama-darwin-x64": "1.0.0-beta.4",
|
|
85
|
+
"@fugood/node-llama-darwin-arm64": "1.0.0-beta.4"
|
|
84
86
|
},
|
|
85
87
|
"devDependencies": {
|
|
86
88
|
"@babel/preset-env": "^7.24.4",
|
|
@@ -92,14 +94,12 @@
|
|
|
92
94
|
"cmake-js": "^7.3.0",
|
|
93
95
|
"husky": "^9.0.11",
|
|
94
96
|
"jest": "^29.7.0",
|
|
97
|
+
"node-addon-api": "^8.0.0",
|
|
95
98
|
"release-it": "^17.7.0",
|
|
96
99
|
"rimraf": "^6.0.1",
|
|
97
100
|
"typescript": "^5.4.5",
|
|
98
101
|
"wait-for-expect": "^3.0.2"
|
|
99
102
|
},
|
|
100
|
-
"dependencies": {
|
|
101
|
-
"node-addon-api": "^8.0.0"
|
|
102
|
-
},
|
|
103
103
|
"jest": {
|
|
104
104
|
"testEnvironment": "node",
|
|
105
105
|
"moduleFileExtensions": [
|
|
@@ -1,31 +1,18 @@
|
|
|
1
|
-
const fs = require('fs');
|
|
2
1
|
const path = require('path');
|
|
3
2
|
|
|
4
3
|
const validAccelerators = process.platform === 'darwin' ? [] : ['vulkan', 'cuda'];
|
|
5
4
|
|
|
6
|
-
|
|
5
|
+
const accelerator = process.env.npm_config_accelerator || '';
|
|
7
6
|
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
__dirname,
|
|
13
|
-
`../node-llama-${process.platform}-${process.arch}${accelerator ? `-${accelerator}` : ''}`
|
|
14
|
-
),
|
|
15
|
-
path.resolve(__dirname, `../build/Release/index.node`),
|
|
16
|
-
];
|
|
17
|
-
|
|
18
|
-
if (!isBuildFromSource && !checkPaths.some(path => fs.existsSync(path))) {
|
|
19
|
-
console.warn('Not found prebuild package, please build from source');
|
|
20
|
-
isBuildFromSource = true;
|
|
7
|
+
if (process.env.npm_config_build_from_source) {
|
|
8
|
+
console.log('Build from source is enabled');
|
|
9
|
+
} else {
|
|
10
|
+
process.exit(0);
|
|
21
11
|
}
|
|
22
12
|
|
|
23
13
|
if (accelerator && !validAccelerators.includes(accelerator)) {
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
if (isBuildFromSource) {
|
|
28
|
-
console.log('Build from source is enabled');
|
|
14
|
+
console.error(`Invalid accelerator: ${accelerator}`);
|
|
15
|
+
process.exit(1);
|
|
29
16
|
}
|
|
30
17
|
|
|
31
18
|
let BuildSystem;
|
|
@@ -1,8 +1,6 @@
|
|
|
1
1
|
#include "chat.h"
|
|
2
2
|
#include "json-schema-to-grammar.h"
|
|
3
3
|
#include "log.h"
|
|
4
|
-
#include "minja/chat-template.hpp"
|
|
5
|
-
#include "minja/minja.hpp"
|
|
6
4
|
|
|
7
5
|
#include <optional>
|
|
8
6
|
|
|
@@ -15,14 +13,6 @@ static std::string format_time(const std::chrono::system_clock::time_point & now
|
|
|
15
13
|
return res;
|
|
16
14
|
}
|
|
17
15
|
|
|
18
|
-
typedef minja::chat_template common_chat_template;
|
|
19
|
-
|
|
20
|
-
struct common_chat_templates {
|
|
21
|
-
bool has_explicit_template; // Model had builtin template or template overridde was specified.
|
|
22
|
-
std::unique_ptr<common_chat_template> template_default; // always set (defaults to chatml)
|
|
23
|
-
std::unique_ptr<common_chat_template> template_tool_use;
|
|
24
|
-
};
|
|
25
|
-
|
|
26
16
|
struct templates_params {
|
|
27
17
|
json messages;
|
|
28
18
|
json tools;
|
|
@@ -6,8 +6,16 @@
|
|
|
6
6
|
#include <chrono>
|
|
7
7
|
#include <string>
|
|
8
8
|
#include <vector>
|
|
9
|
+
#include "minja/chat-template.hpp"
|
|
10
|
+
#include "minja/minja.hpp"
|
|
9
11
|
|
|
10
|
-
|
|
12
|
+
typedef minja::chat_template common_chat_template;
|
|
13
|
+
|
|
14
|
+
struct common_chat_templates {
|
|
15
|
+
bool has_explicit_template; // Model had builtin template or template overridde was specified.
|
|
16
|
+
std::unique_ptr<common_chat_template> template_default; // always set (defaults to chatml)
|
|
17
|
+
std::unique_ptr<common_chat_template> template_tool_use;
|
|
18
|
+
};
|
|
11
19
|
|
|
12
20
|
struct common_chat_tool_call {
|
|
13
21
|
std::string name;
|
|
@@ -1081,6 +1081,7 @@ struct llama_model_params common_model_params_to_llama(common_params & params) {
|
|
|
1081
1081
|
mparams.n_gpu_layers = params.n_gpu_layers;
|
|
1082
1082
|
}
|
|
1083
1083
|
|
|
1084
|
+
mparams.vocab_only = params.vocab_only;
|
|
1084
1085
|
mparams.main_gpu = params.main_gpu;
|
|
1085
1086
|
mparams.split_mode = params.split_mode;
|
|
1086
1087
|
mparams.tensor_split = params.tensor_split;
|
|
@@ -219,6 +219,7 @@ enum common_reasoning_format {
|
|
|
219
219
|
};
|
|
220
220
|
|
|
221
221
|
struct common_params {
|
|
222
|
+
bool vocab_only = false;
|
|
222
223
|
int32_t n_predict = -1; // new tokens to predict
|
|
223
224
|
int32_t n_ctx = 4096; // context size
|
|
224
225
|
int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
|
|
@@ -90,7 +90,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
|
|
|
90
90
|
message(STATUS "ARM detected")
|
|
91
91
|
|
|
92
92
|
if (MSVC AND NOT CMAKE_C_COMPILER_ID STREQUAL "Clang")
|
|
93
|
-
|
|
93
|
+
list(APPEND ARCH_FLAGS /arch:armv8.7)
|
|
94
94
|
else()
|
|
95
95
|
check_cxx_compiler_flag(-mfp16-format=ieee GGML_COMPILER_SUPPORTS_FP16_FORMAT_I3E)
|
|
96
96
|
if (NOT "${GGML_COMPILER_SUPPORTS_FP16_FORMAT_I3E}" STREQUAL "")
|