@fugood/llama.node 1.0.0-beta.2 → 1.0.0-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,15 +1,17 @@
1
1
  {
2
2
  "name": "@fugood/llama.node",
3
3
  "access": "public",
4
- "version": "1.0.0-beta.2",
4
+ "version": "1.0.0-beta.3",
5
5
  "description": "An another Node binding of llama.cpp",
6
6
  "main": "lib/index.js",
7
7
  "scripts": {
8
- "postinstall": "node scripts/build.js",
8
+ "bootstrap": "npm install --omit=optional",
9
+ "postinstall": "node scripts/check.js",
9
10
  "pretest": "node scripts/download-test-models.js",
10
11
  "test": "jest",
11
- "build": "tsc",
12
- "prepack": "npm run build",
12
+ "build": "npx cmake-js build",
13
+ "build-js": "tsc",
14
+ "prepack": "npm run build-js",
13
15
  "prebuild-native": "node scripts/generate_win_dynamic_load.js 6",
14
16
  "build-native": "cmake-js compile",
15
17
  "clean": "rimraf build",
@@ -47,7 +49,7 @@
47
49
  ]
48
50
  },
49
51
  "files": [
50
- "scripts/build.js",
52
+ "scripts/check.js",
51
53
  "scripts/llama.cpp.patch",
52
54
  "src/*.{cc,c,h,hpp}",
53
55
  "src/DecodeAudioTokenWorker.cpp",
@@ -68,19 +70,19 @@
68
70
  "CMakeLists.txt"
69
71
  ],
70
72
  "optionalDependencies": {
71
- "@fugood/node-llama-linux-x64": "1.0.0-beta.2",
72
- "@fugood/node-llama-linux-x64-vulkan": "1.0.0-beta.2",
73
- "@fugood/node-llama-linux-x64-cuda": "1.0.0-beta.2",
74
- "@fugood/node-llama-linux-arm64": "1.0.0-beta.2",
75
- "@fugood/node-llama-linux-arm64-vulkan": "1.0.0-beta.2",
76
- "@fugood/node-llama-linux-arm64-cuda": "1.0.0-beta.2",
77
- "@fugood/node-llama-win32-x64": "1.0.0-beta.2",
78
- "@fugood/node-llama-win32-x64-vulkan": "1.0.0-beta.2",
79
- "@fugood/node-llama-win32-x64-cuda": "1.0.0-beta.2",
80
- "@fugood/node-llama-win32-arm64": "1.0.0-beta.2",
81
- "@fugood/node-llama-win32-arm64-vulkan": "1.0.0-beta.2",
82
- "@fugood/node-llama-darwin-x64": "1.0.0-beta.2",
83
- "@fugood/node-llama-darwin-arm64": "1.0.0-beta.2"
73
+ "@fugood/node-llama-linux-x64": "1.0.0-beta.3",
74
+ "@fugood/node-llama-linux-x64-vulkan": "1.0.0-beta.3",
75
+ "@fugood/node-llama-linux-x64-cuda": "1.0.0-beta.3",
76
+ "@fugood/node-llama-linux-arm64": "1.0.0-beta.3",
77
+ "@fugood/node-llama-linux-arm64-vulkan": "1.0.0-beta.3",
78
+ "@fugood/node-llama-linux-arm64-cuda": "1.0.0-beta.3",
79
+ "@fugood/node-llama-win32-x64": "1.0.0-beta.3",
80
+ "@fugood/node-llama-win32-x64-vulkan": "1.0.0-beta.3",
81
+ "@fugood/node-llama-win32-x64-cuda": "1.0.0-beta.3",
82
+ "@fugood/node-llama-win32-arm64": "1.0.0-beta.3",
83
+ "@fugood/node-llama-win32-arm64-vulkan": "1.0.0-beta.3",
84
+ "@fugood/node-llama-darwin-x64": "1.0.0-beta.3",
85
+ "@fugood/node-llama-darwin-arm64": "1.0.0-beta.3"
84
86
  },
85
87
  "devDependencies": {
86
88
  "@babel/preset-env": "^7.24.4",
@@ -92,14 +94,12 @@
92
94
  "cmake-js": "^7.3.0",
93
95
  "husky": "^9.0.11",
94
96
  "jest": "^29.7.0",
97
+ "node-addon-api": "^8.0.0",
95
98
  "release-it": "^17.7.0",
96
99
  "rimraf": "^6.0.1",
97
100
  "typescript": "^5.4.5",
98
101
  "wait-for-expect": "^3.0.2"
99
102
  },
100
- "dependencies": {
101
- "node-addon-api": "^8.0.0"
102
- },
103
103
  "jest": {
104
104
  "testEnvironment": "node",
105
105
  "moduleFileExtensions": [
@@ -1,31 +1,18 @@
1
- const fs = require('fs');
2
1
  const path = require('path');
3
2
 
4
3
  const validAccelerators = process.platform === 'darwin' ? [] : ['vulkan', 'cuda'];
5
4
 
6
- let isBuildFromSource = process.env.npm_config_build_from_source === 'true';
5
+ const accelerator = process.env.npm_config_accelerator || '';
7
6
 
8
- let accelerator = process.env.npm_config_accelerator || '';
9
-
10
- const checkPaths = [
11
- path.resolve(
12
- __dirname,
13
- `../node-llama-${process.platform}-${process.arch}${accelerator ? `-${accelerator}` : ''}`
14
- ),
15
- path.resolve(__dirname, `../build/Release/index.node`),
16
- ];
17
-
18
- if (!isBuildFromSource && !checkPaths.some(path => fs.existsSync(path))) {
19
- console.warn('Not found prebuild package, please build from source');
20
- isBuildFromSource = true;
7
+ if (process.env.npm_config_build_from_source) {
8
+ console.log('Build from source is enabled');
9
+ } else {
10
+ process.exit(0);
21
11
  }
22
12
 
23
13
  if (accelerator && !validAccelerators.includes(accelerator)) {
24
- throw new Error(`Invalid accelerator: ${accelerator}`);
25
- }
26
-
27
- if (isBuildFromSource) {
28
- console.log('Build from source is enabled');
14
+ console.error(`Invalid accelerator: ${accelerator}`);
15
+ process.exit(1);
29
16
  }
30
17
 
31
18
  let BuildSystem;
@@ -1,8 +1,6 @@
1
1
  #include "chat.h"
2
2
  #include "json-schema-to-grammar.h"
3
3
  #include "log.h"
4
- #include "minja/chat-template.hpp"
5
- #include "minja/minja.hpp"
6
4
 
7
5
  #include <optional>
8
6
 
@@ -15,14 +13,6 @@ static std::string format_time(const std::chrono::system_clock::time_point & now
15
13
  return res;
16
14
  }
17
15
 
18
- typedef minja::chat_template common_chat_template;
19
-
20
- struct common_chat_templates {
21
- bool has_explicit_template; // Model had builtin template or template overridde was specified.
22
- std::unique_ptr<common_chat_template> template_default; // always set (defaults to chatml)
23
- std::unique_ptr<common_chat_template> template_tool_use;
24
- };
25
-
26
16
  struct templates_params {
27
17
  json messages;
28
18
  json tools;
@@ -6,8 +6,16 @@
6
6
  #include <chrono>
7
7
  #include <string>
8
8
  #include <vector>
9
+ #include "minja/chat-template.hpp"
10
+ #include "minja/minja.hpp"
9
11
 
10
- struct common_chat_templates;
12
+ typedef minja::chat_template common_chat_template;
13
+
14
+ struct common_chat_templates {
15
+ bool has_explicit_template; // Model had builtin template or template overridde was specified.
16
+ std::unique_ptr<common_chat_template> template_default; // always set (defaults to chatml)
17
+ std::unique_ptr<common_chat_template> template_tool_use;
18
+ };
11
19
 
12
20
  struct common_chat_tool_call {
13
21
  std::string name;
@@ -1081,6 +1081,7 @@ struct llama_model_params common_model_params_to_llama(common_params & params) {
1081
1081
  mparams.n_gpu_layers = params.n_gpu_layers;
1082
1082
  }
1083
1083
 
1084
+ mparams.vocab_only = params.vocab_only;
1084
1085
  mparams.main_gpu = params.main_gpu;
1085
1086
  mparams.split_mode = params.split_mode;
1086
1087
  mparams.tensor_split = params.tensor_split;
@@ -219,6 +219,7 @@ enum common_reasoning_format {
219
219
  };
220
220
 
221
221
  struct common_params {
222
+ bool vocab_only = false;
222
223
  int32_t n_predict = -1; // new tokens to predict
223
224
  int32_t n_ctx = 4096; // context size
224
225
  int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
@@ -90,7 +90,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
90
90
  message(STATUS "ARM detected")
91
91
 
92
92
  if (MSVC AND NOT CMAKE_C_COMPILER_ID STREQUAL "Clang")
93
- message(FATAL_ERROR "MSVC is not supported for ARM, use clang")
93
+ list(APPEND ARCH_FLAGS /arch:armv8.7)
94
94
  else()
95
95
  check_cxx_compiler_flag(-mfp16-format=ieee GGML_COMPILER_SUPPORTS_FP16_FORMAT_I3E)
96
96
  if (NOT "${GGML_COMPILER_SUPPORTS_FP16_FORMAT_I3E}" STREQUAL "")