@fugood/llama.node 0.0.1-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. package/CMakeLists.txt +85 -0
  2. package/README.md +56 -0
  3. package/bin/darwin/arm64/llama-node.node +0 -0
  4. package/bin/darwin/x64/llama-node.node +0 -0
  5. package/bin/linux/arm64/llama-node.node +0 -0
  6. package/bin/linux/x64/llama-node.node +0 -0
  7. package/bin/win32/arm64/llama-node.node +0 -0
  8. package/bin/win32/arm64/node.lib +0 -0
  9. package/bin/win32/x64/llama-node.node +0 -0
  10. package/bin/win32/x64/node.lib +0 -0
  11. package/lib/binding.js +13 -0
  12. package/lib/binding.ts +57 -0
  13. package/lib/index.js +24 -0
  14. package/lib/index.ts +13 -0
  15. package/package.json +65 -0
  16. package/src/addons.cpp +506 -0
  17. package/src/llama.cpp/CMakeLists.txt +1320 -0
  18. package/src/llama.cpp/build.zig +172 -0
  19. package/src/llama.cpp/cmake/FindSIMD.cmake +100 -0
  20. package/src/llama.cpp/common/CMakeLists.txt +87 -0
  21. package/src/llama.cpp/common/base64.hpp +392 -0
  22. package/src/llama.cpp/common/common.cpp +2949 -0
  23. package/src/llama.cpp/common/common.h +324 -0
  24. package/src/llama.cpp/common/console.cpp +501 -0
  25. package/src/llama.cpp/common/console.h +19 -0
  26. package/src/llama.cpp/common/grammar-parser.cpp +440 -0
  27. package/src/llama.cpp/common/grammar-parser.h +29 -0
  28. package/src/llama.cpp/common/json-schema-to-grammar.cpp +764 -0
  29. package/src/llama.cpp/common/json-schema-to-grammar.h +4 -0
  30. package/src/llama.cpp/common/json.hpp +24766 -0
  31. package/src/llama.cpp/common/log.h +724 -0
  32. package/src/llama.cpp/common/ngram-cache.cpp +282 -0
  33. package/src/llama.cpp/common/ngram-cache.h +94 -0
  34. package/src/llama.cpp/common/sampling.cpp +353 -0
  35. package/src/llama.cpp/common/sampling.h +147 -0
  36. package/src/llama.cpp/common/stb_image.h +8396 -0
  37. package/src/llama.cpp/common/train.cpp +1513 -0
  38. package/src/llama.cpp/common/train.h +233 -0
  39. package/src/llama.cpp/examples/CMakeLists.txt +52 -0
  40. package/src/llama.cpp/examples/baby-llama/CMakeLists.txt +5 -0
  41. package/src/llama.cpp/examples/baby-llama/baby-llama.cpp +1640 -0
  42. package/src/llama.cpp/examples/batched/CMakeLists.txt +5 -0
  43. package/src/llama.cpp/examples/batched/batched.cpp +262 -0
  44. package/src/llama.cpp/examples/batched-bench/CMakeLists.txt +5 -0
  45. package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +261 -0
  46. package/src/llama.cpp/examples/beam-search/CMakeLists.txt +5 -0
  47. package/src/llama.cpp/examples/beam-search/beam-search.cpp +188 -0
  48. package/src/llama.cpp/examples/benchmark/CMakeLists.txt +6 -0
  49. package/src/llama.cpp/examples/benchmark/benchmark-matmult.cpp +275 -0
  50. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +5 -0
  51. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +936 -0
  52. package/src/llama.cpp/examples/embedding/CMakeLists.txt +5 -0
  53. package/src/llama.cpp/examples/embedding/embedding.cpp +211 -0
  54. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +9 -0
  55. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +195 -0
  56. package/src/llama.cpp/examples/export-lora/CMakeLists.txt +5 -0
  57. package/src/llama.cpp/examples/export-lora/export-lora.cpp +462 -0
  58. package/src/llama.cpp/examples/finetune/CMakeLists.txt +5 -0
  59. package/src/llama.cpp/examples/finetune/finetune.cpp +1861 -0
  60. package/src/llama.cpp/examples/gbnf-validator/CMakeLists.txt +5 -0
  61. package/src/llama.cpp/examples/gbnf-validator/gbnf-validator.cpp +132 -0
  62. package/src/llama.cpp/examples/gguf/CMakeLists.txt +5 -0
  63. package/src/llama.cpp/examples/gguf/gguf.cpp +256 -0
  64. package/src/llama.cpp/examples/gguf-split/CMakeLists.txt +5 -0
  65. package/src/llama.cpp/examples/gguf-split/gguf-split.cpp +553 -0
  66. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +5 -0
  67. package/src/llama.cpp/examples/gritlm/gritlm.cpp +215 -0
  68. package/src/llama.cpp/examples/imatrix/CMakeLists.txt +5 -0
  69. package/src/llama.cpp/examples/imatrix/imatrix.cpp +655 -0
  70. package/src/llama.cpp/examples/infill/CMakeLists.txt +5 -0
  71. package/src/llama.cpp/examples/infill/infill.cpp +767 -0
  72. package/src/llama.cpp/examples/jeopardy/questions.txt +100 -0
  73. package/src/llama.cpp/examples/llama-bench/CMakeLists.txt +5 -0
  74. package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +1286 -0
  75. package/src/llama.cpp/examples/llama.android/app/src/main/cpp/CMakeLists.txt +50 -0
  76. package/src/llama.cpp/examples/llama.android/app/src/main/cpp/llama-android.cpp +443 -0
  77. package/src/llama.cpp/examples/llava/CMakeLists.txt +37 -0
  78. package/src/llama.cpp/examples/llava/clip.cpp +2027 -0
  79. package/src/llama.cpp/examples/llava/clip.h +85 -0
  80. package/src/llama.cpp/examples/llava/llava-cli.cpp +309 -0
  81. package/src/llama.cpp/examples/llava/llava.cpp +426 -0
  82. package/src/llama.cpp/examples/llava/llava.h +50 -0
  83. package/src/llama.cpp/examples/llava/requirements.txt +3 -0
  84. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +5 -0
  85. package/src/llama.cpp/examples/lookahead/lookahead.cpp +485 -0
  86. package/src/llama.cpp/examples/lookup/CMakeLists.txt +23 -0
  87. package/src/llama.cpp/examples/lookup/lookup-create.cpp +41 -0
  88. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +47 -0
  89. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +160 -0
  90. package/src/llama.cpp/examples/lookup/lookup.cpp +258 -0
  91. package/src/llama.cpp/examples/main/CMakeLists.txt +5 -0
  92. package/src/llama.cpp/examples/main/main.cpp +957 -0
  93. package/src/llama.cpp/examples/main-cmake-pkg/CMakeLists.txt +33 -0
  94. package/src/llama.cpp/examples/parallel/CMakeLists.txt +5 -0
  95. package/src/llama.cpp/examples/parallel/parallel.cpp +427 -0
  96. package/src/llama.cpp/examples/passkey/CMakeLists.txt +5 -0
  97. package/src/llama.cpp/examples/passkey/passkey.cpp +302 -0
  98. package/src/llama.cpp/examples/perplexity/CMakeLists.txt +5 -0
  99. package/src/llama.cpp/examples/perplexity/perplexity.cpp +1943 -0
  100. package/src/llama.cpp/examples/quantize/CMakeLists.txt +6 -0
  101. package/src/llama.cpp/examples/quantize/quantize.cpp +423 -0
  102. package/src/llama.cpp/examples/quantize-stats/CMakeLists.txt +6 -0
  103. package/src/llama.cpp/examples/quantize-stats/quantize-stats.cpp +424 -0
  104. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +5 -0
  105. package/src/llama.cpp/examples/retrieval/retrieval.cpp +350 -0
  106. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +5 -0
  107. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +246 -0
  108. package/src/llama.cpp/examples/server/CMakeLists.txt +40 -0
  109. package/src/llama.cpp/examples/server/bench/requirements.txt +2 -0
  110. package/src/llama.cpp/examples/server/httplib.h +9465 -0
  111. package/src/llama.cpp/examples/server/server.cpp +3826 -0
  112. package/src/llama.cpp/examples/server/tests/requirements.txt +6 -0
  113. package/src/llama.cpp/examples/server/utils.hpp +653 -0
  114. package/src/llama.cpp/examples/simple/CMakeLists.txt +5 -0
  115. package/src/llama.cpp/examples/simple/simple.cpp +183 -0
  116. package/src/llama.cpp/examples/speculative/CMakeLists.txt +5 -0
  117. package/src/llama.cpp/examples/speculative/speculative.cpp +614 -0
  118. package/src/llama.cpp/examples/sycl/CMakeLists.txt +9 -0
  119. package/src/llama.cpp/examples/sycl/ls-sycl-device.cpp +13 -0
  120. package/src/llama.cpp/examples/tokenize/CMakeLists.txt +5 -0
  121. package/src/llama.cpp/examples/tokenize/tokenize.cpp +42 -0
  122. package/src/llama.cpp/examples/train-text-from-scratch/CMakeLists.txt +5 -0
  123. package/src/llama.cpp/examples/train-text-from-scratch/train-text-from-scratch.cpp +1252 -0
  124. package/src/llama.cpp/ggml-alloc.c +985 -0
  125. package/src/llama.cpp/ggml-alloc.h +76 -0
  126. package/src/llama.cpp/ggml-backend-impl.h +141 -0
  127. package/src/llama.cpp/ggml-backend.c +2099 -0
  128. package/src/llama.cpp/ggml-backend.h +233 -0
  129. package/src/llama.cpp/ggml-common.h +1853 -0
  130. package/src/llama.cpp/ggml-cuda.h +43 -0
  131. package/src/llama.cpp/ggml-impl.h +265 -0
  132. package/src/llama.cpp/ggml-kompute.cpp +2006 -0
  133. package/src/llama.cpp/ggml-kompute.h +46 -0
  134. package/src/llama.cpp/ggml-metal.h +66 -0
  135. package/src/llama.cpp/ggml-mpi.c +216 -0
  136. package/src/llama.cpp/ggml-mpi.h +39 -0
  137. package/src/llama.cpp/ggml-opencl.cpp +2301 -0
  138. package/src/llama.cpp/ggml-opencl.h +36 -0
  139. package/src/llama.cpp/ggml-quants.c +12678 -0
  140. package/src/llama.cpp/ggml-quants.h +133 -0
  141. package/src/llama.cpp/ggml-sycl.cpp +17882 -0
  142. package/src/llama.cpp/ggml-sycl.h +49 -0
  143. package/src/llama.cpp/ggml-vulkan-shaders.hpp +69849 -0
  144. package/src/llama.cpp/ggml-vulkan.cpp +6442 -0
  145. package/src/llama.cpp/ggml-vulkan.h +29 -0
  146. package/src/llama.cpp/ggml.c +21819 -0
  147. package/src/llama.cpp/ggml.h +2403 -0
  148. package/src/llama.cpp/llama.cpp +17468 -0
  149. package/src/llama.cpp/llama.h +1117 -0
  150. package/src/llama.cpp/pocs/CMakeLists.txt +12 -0
  151. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +9 -0
  152. package/src/llama.cpp/pocs/vdot/q8dot.cpp +172 -0
  153. package/src/llama.cpp/pocs/vdot/vdot.cpp +310 -0
  154. package/src/llama.cpp/prompts/LLM-questions.txt +49 -0
  155. package/src/llama.cpp/prompts/alpaca.txt +1 -0
  156. package/src/llama.cpp/prompts/assistant.txt +31 -0
  157. package/src/llama.cpp/prompts/chat-with-baichuan.txt +4 -0
  158. package/src/llama.cpp/prompts/chat-with-bob.txt +7 -0
  159. package/src/llama.cpp/prompts/chat-with-qwen.txt +1 -0
  160. package/src/llama.cpp/prompts/chat-with-vicuna-v0.txt +7 -0
  161. package/src/llama.cpp/prompts/chat-with-vicuna-v1.txt +7 -0
  162. package/src/llama.cpp/prompts/chat.txt +28 -0
  163. package/src/llama.cpp/prompts/dan-modified.txt +1 -0
  164. package/src/llama.cpp/prompts/dan.txt +1 -0
  165. package/src/llama.cpp/prompts/mnemonics.txt +93 -0
  166. package/src/llama.cpp/prompts/parallel-questions.txt +43 -0
  167. package/src/llama.cpp/prompts/reason-act.txt +18 -0
  168. package/src/llama.cpp/requirements/requirements-convert-hf-to-gguf.txt +3 -0
  169. package/src/llama.cpp/requirements/requirements-convert-llama-ggml-to-gguf.txt +1 -0
  170. package/src/llama.cpp/requirements/requirements-convert-lora-to-ggml.txt +2 -0
  171. package/src/llama.cpp/requirements/requirements-convert-persimmon-to-gguf.txt +2 -0
  172. package/src/llama.cpp/requirements/requirements-convert.txt +5 -0
  173. package/src/llama.cpp/requirements.txt +12 -0
  174. package/src/llama.cpp/scripts/gen-build-info-cpp.cmake +24 -0
  175. package/src/llama.cpp/scripts/xxd.cmake +16 -0
  176. package/src/llama.cpp/sgemm.cpp +999 -0
  177. package/src/llama.cpp/sgemm.h +12 -0
  178. package/src/llama.cpp/tests/CMakeLists.txt +78 -0
  179. package/src/llama.cpp/tests/get-model.cpp +21 -0
  180. package/src/llama.cpp/tests/get-model.h +2 -0
  181. package/src/llama.cpp/tests/test-autorelease.cpp +24 -0
  182. package/src/llama.cpp/tests/test-backend-ops.cpp +2266 -0
  183. package/src/llama.cpp/tests/test-c.c +7 -0
  184. package/src/llama.cpp/tests/test-chat-template.cpp +107 -0
  185. package/src/llama.cpp/tests/test-double-float.cpp +57 -0
  186. package/src/llama.cpp/tests/test-grad0.cpp +1606 -0
  187. package/src/llama.cpp/tests/test-grammar-integration.cpp +243 -0
  188. package/src/llama.cpp/tests/test-grammar-parser.cpp +250 -0
  189. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +899 -0
  190. package/src/llama.cpp/tests/test-llama-grammar.cpp +402 -0
  191. package/src/llama.cpp/tests/test-model-load-cancel.cpp +27 -0
  192. package/src/llama.cpp/tests/test-opt.cpp +181 -0
  193. package/src/llama.cpp/tests/test-quantize-fns.cpp +185 -0
  194. package/src/llama.cpp/tests/test-quantize-perf.cpp +363 -0
  195. package/src/llama.cpp/tests/test-rope.cpp +221 -0
  196. package/src/llama.cpp/tests/test-sampling.cpp +301 -0
  197. package/src/llama.cpp/tests/test-tokenizer-0-falcon.cpp +187 -0
  198. package/src/llama.cpp/tests/test-tokenizer-0-llama.cpp +190 -0
  199. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +123 -0
  200. package/src/llama.cpp/tests/test-tokenizer-1-llama.cpp +111 -0
  201. package/src/llama.cpp/unicode-data.cpp +1651 -0
  202. package/src/llama.cpp/unicode-data.h +16 -0
  203. package/src/llama.cpp/unicode.cpp +277 -0
  204. package/src/llama.cpp/unicode.h +28 -0
package/CMakeLists.txt ADDED
@@ -0,0 +1,85 @@
1
+ cmake_minimum_required(VERSION 3.15)
2
+ cmake_policy(SET CMP0091 NEW)
3
+ cmake_policy(SET CMP0042 NEW)
4
+
5
+ project (llama-node)
6
+
7
+ set(CMAKE_CXX_STANDARD 17)
8
+
9
+ if(NOT DEFINED napi_build_version)
10
+ set(napi_build_version 6)
11
+ endif()
12
+ add_definitions(-DNAPI_VERSION=${napi_build_version})
13
+ message(STATUS "NAPI_VERSION: ${napi_build_version}")
14
+
15
+ set(CMAKE_SYSTEM_PROCESSOR ${NODE_ARCH})
16
+ string(TOLOWER ${CMAKE_SYSTEM_NAME} PLATFORM)
17
+ string(TOLOWER ${CMAKE_SYSTEM_PROCESSOR} ARCH)
18
+
19
+ # normalize platform to nodejs
20
+ string(REPLACE "windows" "win32" PLATFORM ${PLATFORM})
21
+
22
+ # normalize arch to nodejs: 'arm', 'arm64', 'ia32', 'loong64', 'mips', 'mipsel', 'ppc', 'ppc64', 'riscv64', 's390', 's390x', and 'x64'.
23
+ string(REPLACE "amd64" "x64" ARCH ${ARCH})
24
+ string(REPLACE "x86_64" "x64" ARCH ${ARCH})
25
+ string(REPLACE "i686" "ia32" ARCH ${ARCH})
26
+ string(REPLACE "i386" "ia32" ARCH ${ARCH})
27
+ string(REPLACE "armv7l" "arm" ARCH ${ARCH})
28
+ string(REPLACE "arm" "arm" ARCH ${ARCH})
29
+ string(REPLACE "arm64ex" "arm64" ARCH ${ARCH})
30
+ string(REPLACE "aarch64" "arm64" ARCH ${ARCH})
31
+
32
+ if(DEFINED VARIANT)
33
+ set(VARIANT -${VARIANT})
34
+ else()
35
+ set(VARIANT "")
36
+ endif()
37
+
38
+ set(PLATFORM_BINARY_DIR ${CMAKE_SOURCE_DIR}/bin/${PLATFORM}${VARIANT}/${ARCH})
39
+
40
+ message(STATUS "Build type: ${CMAKE_BUILD_TYPE}")
41
+ message(STATUS "Platform: ${PLATFORM}")
42
+ message(STATUS "Architecture: ${ARCH}")
43
+ message(STATUS "PLATFORM_BINARY_DIR: ${PLATFORM_BINARY_DIR}")
44
+
45
+ # set strip flags
46
+ if(CMAKE_BUILD_TYPE STREQUAL "Release")
47
+ if(UNIX OR MINGW)
48
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -s")
49
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -s")
50
+ elseif(MSVC)
51
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /s")
52
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /s")
53
+ endif()
54
+ endif()
55
+
56
+ include_directories(${CMAKE_JS_INC})
57
+
58
+ # flags: -fPIC
59
+ set(CMAKE_POSITION_INDEPENDENT_CODE ON)
60
+
61
+ set(LLAMA_STATIC ON CACHE BOOL "Build llama as static library")
62
+ add_subdirectory("src/llama.cpp")
63
+
64
+ file(GLOB SOURCE_FILES "src/addons.cpp")
65
+
66
+ add_library(${PROJECT_NAME} SHARED ${SOURCE_FILES} ${CMAKE_JS_SRC})
67
+ set_target_properties(${PROJECT_NAME} PROPERTIES PREFIX "" SUFFIX ".node")
68
+ target_link_libraries(${PROJECT_NAME} ${CMAKE_JS_LIB} llama ggml common)
69
+
70
+ if(MSVC AND CMAKE_JS_NODELIB_DEF AND CMAKE_JS_NODELIB_TARGET)
71
+ # Generate node.lib
72
+ execute_process(COMMAND ${CMAKE_AR} /def:${CMAKE_JS_NODELIB_DEF} /out:${CMAKE_JS_NODELIB_TARGET} ${CMAKE_STATIC_LINKER_FLAGS})
73
+ # copy target to bin folder
74
+ get_filename_component(CMAKE_JS_NODELIB_TARGET_NAME ${CMAKE_JS_NODELIB_TARGET} NAME)
75
+ add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD
76
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_JS_NODELIB_TARGET} ${PLATFORM_BINARY_DIR}/${CMAKE_JS_NODELIB_TARGET_NAME}
77
+ COMMENT "Copying to bin folder"
78
+ )
79
+ endif()
80
+
81
+ # copy target to bin folder
82
+ add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD
83
+ COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:${PROJECT_NAME}> ${PLATFORM_BINARY_DIR}/$<TARGET_FILE_NAME:${PROJECT_NAME}>
84
+ COMMENT "Copying to bin folder"
85
+ )
package/README.md ADDED
@@ -0,0 +1,56 @@
1
+ # llama.node
2
+
3
+ Node binding of [llama.cpp](https://github.com/ggerganov/llama.cpp).
4
+
5
+ [llama.cpp](https://github.com/ggerganov/llama.cpp): Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++
6
+
7
+ ## Installation
8
+
9
+ ```sh
10
+ npm install llama.node
11
+ ```
12
+
13
+ ## Usage
14
+
15
+ ```js
16
+ import { loadModel } from 'llama.node'
17
+
18
+ // Initial a Llama context with the model (may take a while)
19
+ const context = loadModel({
20
+ model: 'path/to/gguf/model',
21
+ use_mlock: true,
22
+ n_ctx: 2048,
23
+ n_gpu_layers: 1, // > 0: enable Metal on iOS
24
+ // embedding: true, // use embedding
25
+ })
26
+
27
+ // Do completion
28
+ const { text, timings } = await context.completion(
29
+ {
30
+ prompt: 'This is a conversation between user and llama, a friendly chatbot. respond in simple markdown.\n\nUser: Hello!\nLlama:',
31
+ n_predict: 100,
32
+ stop: ['</s>', 'Llama:', 'User:'],
33
+ // n_threads: 4,
34
+ },
35
+ (data) => {
36
+ // This is a partial completion callback
37
+ const { token } = data
38
+ },
39
+ )
40
+ console.log('Result:', text)
41
+ ```
42
+
43
+ ## License
44
+
45
+ MIT
46
+
47
+ ---
48
+
49
+ <p align="center">
50
+ <a href="https://bricks.tools">
51
+ <img width="90px" src="https://avatars.githubusercontent.com/u/17320237?s=200&v=4">
52
+ </a>
53
+ <p align="center">
54
+ Built and maintained by <a href="https://bricks.tools">BRICKS</a>.
55
+ </p>
56
+ </p>
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
package/lib/binding.js ADDED
@@ -0,0 +1,13 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.loadModule = void 0;
4
+ const loadModule = (variant) => {
5
+ try {
6
+ if (variant) {
7
+ return require(`../bin/${process.platform}-${variant}/${process.arch}/llama-node.node`);
8
+ }
9
+ }
10
+ catch (_a) { } // ignore errors and try the common path
11
+ return require(`../bin/${process.platform}/${process.arch}/llama-node.node`);
12
+ };
13
+ exports.loadModule = loadModule;
package/lib/binding.ts ADDED
@@ -0,0 +1,57 @@
1
+ export type LlamaModelOptions = {
2
+ model: string
3
+ embedding?: boolean
4
+ n_ctx?: number
5
+ n_batch?: number
6
+ n_threads?: number
7
+ n_gpu_layers?: number
8
+ use_mlock?: boolean
9
+ use_mmap?: boolean
10
+ }
11
+
12
+ export type LlamaCompletionOptions = {
13
+ prompt: string
14
+ n_samples?: number
15
+ temperature?: number
16
+ top_k?: number
17
+ top_p?: number
18
+ repetition_penalty?: number
19
+ n_predict?: number
20
+ max_length?: number
21
+ max_tokens?: number
22
+ seed?: number
23
+ stop?: string[]
24
+ }
25
+
26
+ export type LlamaCompletionResult = {
27
+ text: string
28
+ tokens_predicted: number
29
+ tokens_evaluated: number
30
+ truncated: boolean
31
+ }
32
+
33
+ export type LlamaCompletionToken = {
34
+ token: string
35
+ }
36
+
37
+ export interface LlamaContext {
38
+ new (options: LlamaModelOptions): LlamaContext
39
+ getSystemInfo(): string
40
+ completion(options: LlamaCompletionOptions, callback?: (token: LlamaCompletionToken) => void): Promise<LlamaCompletionResult>
41
+ stopCompletion(): void
42
+ saveSession(path: string): Promise<void>
43
+ loadSession(path: string): Promise<void>
44
+ }
45
+
46
+ export interface Module {
47
+ LlamaContext: LlamaContext
48
+ }
49
+
50
+ export const loadModule = (variant?: string): Module => {
51
+ try {
52
+ if (variant) {
53
+ return require(`../bin/${process.platform}-${variant}/${process.arch}/llama-node.node`) as Module
54
+ }
55
+ } catch {} // ignore errors and try the common path
56
+ return require(`../bin/${process.platform}/${process.arch}/llama-node.node`) as Module
57
+ }
package/lib/index.js ADDED
@@ -0,0 +1,24 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
+ };
16
+ Object.defineProperty(exports, "__esModule", { value: true });
17
+ exports.loadModel = void 0;
18
+ const binding_1 = require("./binding");
19
+ __exportStar(require("./binding"), exports);
20
+ const loadModel = (options) => {
21
+ const { LlamaContext } = (0, binding_1.loadModule)(options.lib_variant);
22
+ return new LlamaContext(options);
23
+ };
24
+ exports.loadModel = loadModel;
package/lib/index.ts ADDED
@@ -0,0 +1,13 @@
1
+ import { loadModule, LlamaModelOptions } from './binding'
2
+ import type { LlamaContext } from './binding'
3
+
4
+ export * from './binding'
5
+
6
+ export interface LlamaModelOptionsExtended extends LlamaModelOptions {
7
+ lib_variant?: string
8
+ }
9
+
10
+ export const loadModel = (options: LlamaModelOptionsExtended): LlamaContext => {
11
+ const { LlamaContext } = loadModule(options.lib_variant)
12
+ return new LlamaContext(options)
13
+ }
package/package.json ADDED
@@ -0,0 +1,65 @@
1
+ {
2
+ "name": "@fugood/llama.node",
3
+ "access": "public",
4
+ "version": "0.0.1-alpha.1",
5
+ "description": "Llama.cpp for Node.js",
6
+ "main": "lib/index.js",
7
+ "scripts": {
8
+ "test": "jest",
9
+ "build": "tsc",
10
+ "prepack": "yarn build",
11
+ "build-native": "cmake-js compile",
12
+ "clean": "rimraf build",
13
+ "prepare": "husky",
14
+ "commitlint": "commitlint --edit"
15
+ },
16
+ "repository": {
17
+ "type": "git",
18
+ "url": "git+https://github.com/mybigday/llama.node.git"
19
+ },
20
+ "keywords": [
21
+ "llama",
22
+ "llm",
23
+ "ai",
24
+ "genai"
25
+ ],
26
+ "author": "Hans <hans.chen@bricks.tools>",
27
+ "license": "MIT",
28
+ "bugs": {
29
+ "url": "https://github.com/mybigday/llama.node/issues"
30
+ },
31
+ "homepage": "https://github.com/mybigday/llama.node#readme",
32
+ "publishConfig": {
33
+ "registry": "https://registry.npmjs.org"
34
+ },
35
+ "binary": {
36
+ "napi_versions": [
37
+ 5
38
+ ]
39
+ },
40
+ "files": [
41
+ "bin/**/*",
42
+ "scripts/*.js",
43
+ "scripts/*.ts",
44
+ "src/**/*.{c,cc,cpp,h,hh,hpp,txt,cmake}",
45
+ "lib/*.js",
46
+ "lib/*.ts",
47
+ "CMakeLists.txt"
48
+ ],
49
+ "devDependencies": {
50
+ "@babel/preset-env": "^7.24.4",
51
+ "@babel/preset-typescript": "^7.24.1",
52
+ "@commitlint/cli": "^19.3.0",
53
+ "@commitlint/config-conventional": "^19.2.2",
54
+ "@types/jest": "^29.5.12",
55
+ "@types/node": "^20.12.7",
56
+ "cmake-js": "^7.3.0",
57
+ "husky": "^9.0.11",
58
+ "jest": "^29.7.0",
59
+ "rimraf": "^5.0.5",
60
+ "typescript": "^5.4.5"
61
+ },
62
+ "dependencies": {
63
+ "node-addon-api": "^8.0.0"
64
+ }
65
+ }