@fugood/llama.node 0.2.3 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (319) hide show
  1. package/CMakeLists.txt +6 -3
  2. package/bin/darwin/arm64/llama-node.node +0 -0
  3. package/bin/darwin/x64/llama-node.node +0 -0
  4. package/bin/linux/arm64/llama-node.node +0 -0
  5. package/bin/linux/x64/llama-node.node +0 -0
  6. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  7. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  8. package/bin/win32/arm64/llama-node.node +0 -0
  9. package/bin/win32/arm64/node.lib +0 -0
  10. package/bin/win32/x64/llama-node.node +0 -0
  11. package/bin/win32/x64/node.lib +0 -0
  12. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  13. package/bin/win32-vulkan/arm64/node.lib +0 -0
  14. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  15. package/bin/win32-vulkan/x64/node.lib +0 -0
  16. package/lib/binding.ts +8 -1
  17. package/package.json +3 -3
  18. package/patches/llama.patch +12 -12
  19. package/src/DetokenizeWorker.cpp +1 -1
  20. package/src/LlamaContext.cpp +33 -1
  21. package/src/LlamaContext.h +1 -0
  22. package/src/llama.cpp/.github/workflows/bench.yml +310 -0
  23. package/src/llama.cpp/.github/workflows/build.yml +1315 -0
  24. package/src/llama.cpp/.github/workflows/close-issue.yml +23 -0
  25. package/src/llama.cpp/.github/workflows/docker.yml +116 -0
  26. package/src/llama.cpp/.github/workflows/editorconfig.yml +27 -0
  27. package/src/llama.cpp/.github/workflows/gguf-publish.yml +44 -0
  28. package/src/llama.cpp/.github/workflows/labeler.yml +17 -0
  29. package/src/llama.cpp/.github/workflows/nix-ci-aarch64.yml +65 -0
  30. package/src/llama.cpp/.github/workflows/nix-ci.yml +72 -0
  31. package/src/llama.cpp/.github/workflows/nix-flake-update.yml +22 -0
  32. package/src/llama.cpp/.github/workflows/nix-publish-flake.yml +36 -0
  33. package/src/llama.cpp/.github/workflows/python-check-requirements.yml +35 -0
  34. package/src/llama.cpp/.github/workflows/python-lint.yml +23 -0
  35. package/src/llama.cpp/.github/workflows/python-type-check.yml +38 -0
  36. package/src/llama.cpp/.github/workflows/server.yml +183 -0
  37. package/src/llama.cpp/CMakeLists.txt +91 -1245
  38. package/src/llama.cpp/cmake/arm64-windows-llvm.cmake +1 -1
  39. package/src/llama.cpp/cmake/build-info.cmake +58 -0
  40. package/src/llama.cpp/cmake/git-vars.cmake +22 -0
  41. package/src/llama.cpp/common/CMakeLists.txt +4 -3
  42. package/src/llama.cpp/common/build-info.cpp.in +4 -0
  43. package/src/llama.cpp/common/common.cpp +1116 -877
  44. package/src/llama.cpp/common/common.h +191 -77
  45. package/src/llama.cpp/common/grammar-parser.cpp +118 -31
  46. package/src/llama.cpp/common/json-schema-to-grammar.cpp +346 -65
  47. package/src/llama.cpp/common/log.h +1 -1
  48. package/src/llama.cpp/common/ngram-cache.h +10 -3
  49. package/src/llama.cpp/common/sampling.cpp +19 -10
  50. package/src/llama.cpp/docs/build.md +353 -0
  51. package/src/llama.cpp/examples/CMakeLists.txt +22 -22
  52. package/src/llama.cpp/examples/baby-llama/CMakeLists.txt +1 -1
  53. package/src/llama.cpp/examples/baby-llama/baby-llama.cpp +6 -6
  54. package/src/llama.cpp/examples/batched/CMakeLists.txt +1 -1
  55. package/src/llama.cpp/examples/batched/batched.cpp +52 -55
  56. package/src/llama.cpp/examples/batched-bench/CMakeLists.txt +1 -1
  57. package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +20 -72
  58. package/src/llama.cpp/examples/benchmark/CMakeLists.txt +1 -1
  59. package/src/llama.cpp/examples/chat-13B.bat +57 -0
  60. package/src/llama.cpp/examples/convert-llama2c-to-ggml/CMakeLists.txt +1 -1
  61. package/src/llama.cpp/examples/{finetune → cvector-generator}/CMakeLists.txt +2 -2
  62. package/src/llama.cpp/examples/cvector-generator/completions.txt +582 -0
  63. package/src/llama.cpp/examples/cvector-generator/cvector-generator.cpp +503 -0
  64. package/src/llama.cpp/examples/cvector-generator/mean.hpp +48 -0
  65. package/src/llama.cpp/examples/cvector-generator/negative.txt +4 -0
  66. package/src/llama.cpp/examples/cvector-generator/pca.hpp +325 -0
  67. package/src/llama.cpp/examples/cvector-generator/positive.txt +4 -0
  68. package/src/llama.cpp/examples/deprecation-warning/deprecation-warning.cpp +35 -0
  69. package/src/llama.cpp/examples/embedding/CMakeLists.txt +1 -1
  70. package/src/llama.cpp/examples/embedding/embedding.cpp +94 -46
  71. package/src/llama.cpp/examples/eval-callback/CMakeLists.txt +2 -2
  72. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +4 -6
  73. package/src/llama.cpp/examples/export-lora/CMakeLists.txt +1 -1
  74. package/src/llama.cpp/examples/export-lora/export-lora.cpp +344 -386
  75. package/src/llama.cpp/examples/gbnf-validator/CMakeLists.txt +2 -2
  76. package/src/llama.cpp/examples/gbnf-validator/gbnf-validator.cpp +30 -25
  77. package/src/llama.cpp/examples/gguf/CMakeLists.txt +1 -1
  78. package/src/llama.cpp/examples/gguf/gguf.cpp +5 -0
  79. package/src/llama.cpp/examples/gguf-hash/CMakeLists.txt +15 -0
  80. package/src/llama.cpp/examples/gguf-hash/deps/rotate-bits/rotate-bits.h +46 -0
  81. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.c +295 -0
  82. package/src/llama.cpp/examples/gguf-hash/deps/sha1/sha1.h +52 -0
  83. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.c +221 -0
  84. package/src/llama.cpp/examples/gguf-hash/deps/sha256/sha256.h +24 -0
  85. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.c +42 -0
  86. package/src/llama.cpp/examples/gguf-hash/deps/xxhash/xxhash.h +7093 -0
  87. package/src/llama.cpp/examples/gguf-hash/gguf-hash.cpp +693 -0
  88. package/src/llama.cpp/examples/gguf-split/CMakeLists.txt +1 -1
  89. package/src/llama.cpp/examples/gguf-split/gguf-split.cpp +3 -3
  90. package/src/llama.cpp/examples/gritlm/CMakeLists.txt +1 -1
  91. package/src/llama.cpp/examples/gritlm/gritlm.cpp +6 -2
  92. package/src/llama.cpp/examples/imatrix/CMakeLists.txt +1 -1
  93. package/src/llama.cpp/examples/imatrix/imatrix.cpp +137 -176
  94. package/src/llama.cpp/examples/infill/CMakeLists.txt +1 -1
  95. package/src/llama.cpp/examples/infill/infill.cpp +38 -153
  96. package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +175 -94
  97. package/src/llama.cpp/examples/llama.android/app/build.gradle.kts +65 -0
  98. package/src/llama.cpp/examples/llama.android/build.gradle.kts +6 -0
  99. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +68 -0
  100. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +11 -7
  101. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +2 -2
  102. package/src/llama.cpp/examples/llama.android/settings.gradle.kts +18 -0
  103. package/src/llama.cpp/examples/llava/CMakeLists.txt +6 -5
  104. package/src/llama.cpp/examples/llava/android/build_64.sh +8 -0
  105. package/src/llama.cpp/examples/llava/clip.cpp +23 -14
  106. package/src/llama.cpp/examples/llava/llava-cli.cpp +8 -6
  107. package/src/llama.cpp/examples/llava/requirements.txt +3 -2
  108. package/src/llama.cpp/examples/lookahead/CMakeLists.txt +1 -1
  109. package/src/llama.cpp/examples/lookahead/lookahead.cpp +2 -1
  110. package/src/llama.cpp/examples/lookup/CMakeLists.txt +4 -4
  111. package/src/llama.cpp/examples/lookup/lookup-create.cpp +2 -0
  112. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +4 -4
  113. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +2 -2
  114. package/src/llama.cpp/examples/lookup/lookup.cpp +1 -1
  115. package/src/llama.cpp/examples/main/CMakeLists.txt +1 -1
  116. package/src/llama.cpp/examples/main/main.cpp +98 -75
  117. package/src/llama.cpp/examples/main-cmake-pkg/CMakeLists.txt +4 -5
  118. package/src/llama.cpp/examples/parallel/CMakeLists.txt +1 -1
  119. package/src/llama.cpp/examples/parallel/parallel.cpp +2 -1
  120. package/src/llama.cpp/examples/passkey/CMakeLists.txt +1 -1
  121. package/src/llama.cpp/examples/passkey/passkey.cpp +23 -43
  122. package/src/llama.cpp/examples/perplexity/CMakeLists.txt +1 -1
  123. package/src/llama.cpp/examples/perplexity/perplexity.cpp +13 -10
  124. package/src/llama.cpp/examples/quantize/CMakeLists.txt +1 -1
  125. package/src/llama.cpp/examples/quantize/quantize.cpp +37 -34
  126. package/src/llama.cpp/examples/quantize-stats/CMakeLists.txt +1 -1
  127. package/src/llama.cpp/examples/quantize-stats/quantize-stats.cpp +1 -1
  128. package/src/llama.cpp/examples/retrieval/CMakeLists.txt +1 -1
  129. package/src/llama.cpp/examples/retrieval/retrieval.cpp +26 -77
  130. package/src/llama.cpp/examples/save-load-state/CMakeLists.txt +1 -1
  131. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +14 -7
  132. package/src/llama.cpp/examples/server/CMakeLists.txt +26 -2
  133. package/src/llama.cpp/examples/server/server.cpp +274 -671
  134. package/src/llama.cpp/examples/server/tests/requirements.txt +2 -2
  135. package/src/llama.cpp/examples/server/utils.hpp +28 -29
  136. package/src/llama.cpp/examples/simple/CMakeLists.txt +1 -1
  137. package/src/llama.cpp/examples/simple/simple.cpp +21 -29
  138. package/src/llama.cpp/examples/speculative/CMakeLists.txt +1 -1
  139. package/src/llama.cpp/examples/speculative/speculative.cpp +2 -1
  140. package/src/llama.cpp/examples/sycl/CMakeLists.txt +1 -1
  141. package/src/llama.cpp/examples/sycl/build.sh +23 -0
  142. package/src/llama.cpp/examples/sycl/run-llama2.sh +36 -0
  143. package/src/llama.cpp/examples/sycl/win-build-sycl.bat +33 -0
  144. package/src/llama.cpp/examples/sycl/win-run-llama2.bat +9 -0
  145. package/src/llama.cpp/examples/tokenize/CMakeLists.txt +1 -1
  146. package/src/llama.cpp/examples/tokenize/tokenize.cpp +16 -2
  147. package/src/llama.cpp/ggml/CMakeLists.txt +253 -0
  148. package/src/llama.cpp/{cmake → ggml/cmake}/FindSIMD.cmake +6 -6
  149. package/src/llama.cpp/{ggml-backend.h → ggml/include/ggml-backend.h} +22 -17
  150. package/src/llama.cpp/ggml/include/ggml-blas.h +23 -0
  151. package/src/llama.cpp/ggml/include/ggml-cann.h +125 -0
  152. package/src/llama.cpp/{ggml-cuda.h → ggml/include/ggml-cuda.h} +3 -0
  153. package/src/llama.cpp/{ggml-metal.h → ggml/include/ggml-metal.h} +1 -2
  154. package/src/llama.cpp/{ggml-sycl.h → ggml/include/ggml-sycl.h} +3 -10
  155. package/src/llama.cpp/{ggml.h → ggml/include/ggml.h} +80 -85
  156. package/src/llama.cpp/ggml/src/CMakeLists.txt +1329 -0
  157. package/src/llama.cpp/ggml/src/ggml-aarch64.c +2193 -0
  158. package/src/llama.cpp/ggml/src/ggml-aarch64.h +39 -0
  159. package/src/llama.cpp/{ggml-alloc.c → ggml/src/ggml-alloc.c} +100 -49
  160. package/src/llama.cpp/{ggml-backend-impl.h → ggml/src/ggml-backend-impl.h} +20 -8
  161. package/src/llama.cpp/{ggml-backend.c → ggml/src/ggml-backend.c} +307 -167
  162. package/src/llama.cpp/ggml/src/ggml-blas.cpp +367 -0
  163. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +198 -0
  164. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +230 -0
  165. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +2944 -0
  166. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +592 -0
  167. package/src/llama.cpp/ggml/src/ggml-cann/common.h +282 -0
  168. package/src/llama.cpp/ggml/src/ggml-cann/kernels/CMakeLists.txt +32 -0
  169. package/src/llama.cpp/ggml/src/ggml-cann/kernels/ascendc_kernels.h +17 -0
  170. package/src/llama.cpp/ggml/src/ggml-cann/kernels/dup.cpp +223 -0
  171. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f16.cpp +186 -0
  172. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_f32.cpp +180 -0
  173. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +193 -0
  174. package/src/llama.cpp/ggml/src/ggml-cann/kernels/get_row_q8_0.cpp +191 -0
  175. package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +208 -0
  176. package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +206 -0
  177. package/src/llama.cpp/ggml/src/ggml-cann.cpp +2023 -0
  178. package/src/llama.cpp/{ggml-common.h → ggml/src/ggml-common.h} +41 -7
  179. package/src/llama.cpp/{ggml-impl.h → ggml/src/ggml-impl.h} +113 -9
  180. package/src/llama.cpp/{ggml-kompute.cpp → ggml/src/ggml-kompute.cpp} +33 -18
  181. package/src/llama.cpp/{ggml-quants.c → ggml/src/ggml-quants.c} +1460 -940
  182. package/src/llama.cpp/{ggml-quants.h → ggml/src/ggml-quants.h} +19 -20
  183. package/src/llama.cpp/{ggml-rpc.cpp → ggml/src/ggml-rpc.cpp} +95 -72
  184. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +27 -0
  185. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +53 -0
  186. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +355 -0
  187. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +195 -0
  188. package/src/llama.cpp/ggml/src/ggml-sycl/concat.hpp +21 -0
  189. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +547 -0
  190. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +27 -0
  191. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +698 -0
  192. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +1023 -0
  193. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp +27 -0
  194. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +3011 -0
  195. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.cpp +3031 -0
  196. package/src/llama.cpp/ggml/src/ggml-sycl/mmq.hpp +33 -0
  197. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +1027 -0
  198. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp +27 -0
  199. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +374 -0
  200. package/src/llama.cpp/ggml/src/ggml-sycl/norm.hpp +35 -0
  201. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +66 -0
  202. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +275 -0
  203. package/src/llama.cpp/ggml/src/ggml-sycl/rope.hpp +22 -0
  204. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +251 -0
  205. package/src/llama.cpp/ggml/src/ggml-sycl/softmax.hpp +24 -0
  206. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +1140 -0
  207. package/src/llama.cpp/ggml/src/ggml-sycl.cpp +5314 -0
  208. package/src/llama.cpp/{ggml-vulkan.cpp → ggml/src/ggml-vulkan.cpp} +1781 -1868
  209. package/src/llama.cpp/{ggml.c → ggml/src/ggml.c} +1245 -2087
  210. package/src/llama.cpp/{sgemm.cpp → ggml/src/llamafile/sgemm.cpp} +21 -24
  211. package/src/llama.cpp/{sgemm.h → ggml/src/llamafile/sgemm.h} +1 -1
  212. package/src/llama.cpp/ggml/src/vulkan-shaders/CMakeLists.txt +5 -0
  213. package/src/llama.cpp/ggml/src/vulkan-shaders/vulkan-shaders-gen.cpp +552 -0
  214. package/src/llama.cpp/{llama.h → include/llama.h} +175 -100
  215. package/src/llama.cpp/models/.editorconfig +1 -0
  216. package/src/llama.cpp/models/ggml-vocab-aquila.gguf +0 -0
  217. package/src/llama.cpp/models/ggml-vocab-baichuan.gguf +0 -0
  218. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf +0 -0
  219. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +112 -0
  220. package/src/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +46 -0
  221. package/src/llama.cpp/models/ggml-vocab-command-r.gguf +0 -0
  222. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.inp +112 -0
  223. package/src/llama.cpp/models/ggml-vocab-command-r.gguf.out +46 -0
  224. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf +0 -0
  225. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +112 -0
  226. package/src/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +46 -0
  227. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf +0 -0
  228. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +112 -0
  229. package/src/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +46 -0
  230. package/src/llama.cpp/models/ggml-vocab-falcon.gguf +0 -0
  231. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.inp +112 -0
  232. package/src/llama.cpp/models/ggml-vocab-falcon.gguf.out +46 -0
  233. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf +0 -0
  234. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +112 -0
  235. package/src/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +46 -0
  236. package/src/llama.cpp/models/ggml-vocab-gpt-neox.gguf +0 -0
  237. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf +0 -0
  238. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +112 -0
  239. package/src/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +46 -0
  240. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf +0 -0
  241. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +112 -0
  242. package/src/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +46 -0
  243. package/src/llama.cpp/models/ggml-vocab-mpt.gguf +0 -0
  244. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.inp +112 -0
  245. package/src/llama.cpp/models/ggml-vocab-mpt.gguf.out +46 -0
  246. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf +0 -0
  247. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +112 -0
  248. package/src/llama.cpp/models/ggml-vocab-phi-3.gguf.out +46 -0
  249. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf +0 -0
  250. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +112 -0
  251. package/src/llama.cpp/models/ggml-vocab-qwen2.gguf.out +46 -0
  252. package/src/llama.cpp/models/ggml-vocab-refact.gguf +0 -0
  253. package/src/llama.cpp/models/ggml-vocab-refact.gguf.inp +112 -0
  254. package/src/llama.cpp/models/ggml-vocab-refact.gguf.out +46 -0
  255. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf +0 -0
  256. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +112 -0
  257. package/src/llama.cpp/models/ggml-vocab-starcoder.gguf.out +46 -0
  258. package/src/llama.cpp/pocs/vdot/CMakeLists.txt +2 -2
  259. package/src/llama.cpp/requirements/requirements-all.txt +12 -0
  260. package/src/llama.cpp/requirements/requirements-compare-llama-bench.txt +2 -0
  261. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +3 -0
  262. package/src/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +3 -0
  263. package/src/llama.cpp/requirements/{requirements-convert.txt → requirements-convert_legacy_llama.txt} +1 -1
  264. package/src/llama.cpp/requirements/requirements-convert_llama_ggml_to_gguf.txt +1 -0
  265. package/src/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +2 -0
  266. package/src/llama.cpp/requirements/requirements-pydantic.txt +3 -0
  267. package/src/llama.cpp/requirements/requirements-test-tokenizer-random.txt +1 -0
  268. package/src/llama.cpp/requirements.txt +5 -4
  269. package/src/llama.cpp/scripts/build-info.sh +30 -0
  270. package/src/llama.cpp/scripts/install-oneapi.bat +19 -0
  271. package/src/llama.cpp/src/CMakeLists.txt +33 -0
  272. package/src/llama.cpp/src/llama-grammar.cpp +539 -0
  273. package/src/llama.cpp/src/llama-grammar.h +39 -0
  274. package/src/llama.cpp/src/llama-impl.h +26 -0
  275. package/src/llama.cpp/src/llama-sampling.cpp +635 -0
  276. package/src/llama.cpp/src/llama-sampling.h +56 -0
  277. package/src/llama.cpp/src/llama-vocab.cpp +1721 -0
  278. package/src/llama.cpp/src/llama-vocab.h +130 -0
  279. package/src/llama.cpp/{llama.cpp → src/llama.cpp} +5979 -5260
  280. package/src/llama.cpp/{unicode-data.cpp → src/unicode-data.cpp} +851 -802
  281. package/src/llama.cpp/{unicode.cpp → src/unicode.cpp} +52 -30
  282. package/src/llama.cpp/{unicode.h → src/unicode.h} +5 -1
  283. package/src/llama.cpp/tests/CMakeLists.txt +19 -20
  284. package/src/llama.cpp/tests/test-backend-ops.cpp +245 -67
  285. package/src/llama.cpp/tests/test-chat-template.cpp +57 -3
  286. package/src/llama.cpp/tests/test-double-float.cpp +2 -2
  287. package/src/llama.cpp/tests/test-grad0.cpp +2 -2
  288. package/src/llama.cpp/tests/test-grammar-integration.cpp +978 -31
  289. package/src/llama.cpp/tests/test-grammar-parser.cpp +423 -158
  290. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +508 -135
  291. package/src/llama.cpp/tests/test-llama-grammar.cpp +15 -9
  292. package/src/llama.cpp/tests/test-quantize-fns.cpp +1 -1
  293. package/src/llama.cpp/tests/test-quantize-perf.cpp +1 -1
  294. package/src/llama.cpp/tests/test-rope.cpp +3 -4
  295. package/src/llama.cpp/tests/test-sampling.cpp +5 -5
  296. package/src/llama.cpp/tests/test-tokenizer-0.cpp +6 -6
  297. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +20 -15
  298. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +22 -11
  299. package/bin/darwin/arm64/default.metallib +0 -0
  300. package/bin/darwin/x64/default.metallib +0 -0
  301. package/src/llama.cpp/examples/beam-search/CMakeLists.txt +0 -5
  302. package/src/llama.cpp/examples/beam-search/beam-search.cpp +0 -188
  303. package/src/llama.cpp/examples/finetune/finetune.cpp +0 -1862
  304. package/src/llama.cpp/examples/llama.android/llama/CMakeLists.txt +0 -55
  305. package/src/llama.cpp/examples/train-text-from-scratch/CMakeLists.txt +0 -5
  306. package/src/llama.cpp/examples/train-text-from-scratch/train-text-from-scratch.cpp +0 -1253
  307. package/src/llama.cpp/ggml-opencl.cpp +0 -2305
  308. package/src/llama.cpp/ggml-opencl.h +0 -36
  309. package/src/llama.cpp/ggml-sycl.cpp +0 -17340
  310. package/src/llama.cpp/ggml-vulkan-shaders.hpp +0 -81211
  311. package/src/llama.cpp/requirements/requirements-convert-hf-to-gguf-update.txt +0 -2
  312. package/src/llama.cpp/requirements/requirements-convert-hf-to-gguf.txt +0 -2
  313. package/src/llama.cpp/requirements/requirements-convert-llama-ggml-to-gguf.txt +0 -1
  314. package/src/llama.cpp/scripts/gen-build-info-cpp.cmake +0 -24
  315. /package/src/llama.cpp/{ggml-alloc.h → ggml/include/ggml-alloc.h} +0 -0
  316. /package/src/llama.cpp/{ggml-kompute.h → ggml/include/ggml-kompute.h} +0 -0
  317. /package/src/llama.cpp/{ggml-rpc.h → ggml/include/ggml-rpc.h} +0 -0
  318. /package/src/llama.cpp/{ggml-vulkan.h → ggml/include/ggml-vulkan.h} +0 -0
  319. /package/src/llama.cpp/{unicode-data.h → src/unicode-data.h} +0 -0
@@ -0,0 +1,23 @@
1
+ name: Close inactive issues
2
+ on:
3
+ schedule:
4
+ - cron: "42 0 * * *"
5
+
6
+ jobs:
7
+ close-issues:
8
+ runs-on: ubuntu-latest
9
+ permissions:
10
+ issues: write
11
+ pull-requests: write
12
+ steps:
13
+ - uses: actions/stale@v5
14
+ with:
15
+ exempt-issue-labels: "refactor,help wanted,good first issue,research,bug"
16
+ days-before-issue-stale: 30
17
+ days-before-issue-close: 14
18
+ stale-issue-label: "stale"
19
+ close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
20
+ days-before-pr-stale: -1
21
+ days-before-pr-close: -1
22
+ operations-per-run: 10000
23
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -0,0 +1,116 @@
1
+ # This workflow uses actions that are not certified by GitHub.
2
+ # They are provided by a third-party and are governed by
3
+ # separate terms of service, privacy policy, and support
4
+ # documentation.
5
+
6
+ # GitHub recommends pinning actions to a commit SHA.
7
+ # To get a newer version, you will need to update the SHA.
8
+ # You can also reference a tag or branch, but the action may change without warning.
9
+
10
+ name: Publish Docker image
11
+
12
+ on:
13
+ #pull_request:
14
+ push:
15
+ branches:
16
+ - master
17
+ paths: ['.github/workflows/docker.yml', '.devops/*.Dockerfile', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal']
18
+
19
+ concurrency:
20
+ group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
21
+ cancel-in-progress: true
22
+
23
+ jobs:
24
+ push_to_registry:
25
+ name: Push Docker image to Docker Hub
26
+ #if: github.event.pull_request.draft == false
27
+
28
+ runs-on: ubuntu-latest
29
+ env:
30
+ COMMIT_SHA: ${{ github.sha }}
31
+ strategy:
32
+ matrix:
33
+ config:
34
+ - { tag: "light", dockerfile: ".devops/llama-cli.Dockerfile", platforms: "linux/amd64,linux/arm64" }
35
+ - { tag: "server", dockerfile: ".devops/llama-server.Dockerfile", platforms: "linux/amd64,linux/arm64" }
36
+ - { tag: "full", dockerfile: ".devops/full.Dockerfile", platforms: "linux/amd64,linux/arm64" }
37
+ - { tag: "light-cuda", dockerfile: ".devops/llama-cli-cuda.Dockerfile", platforms: "linux/amd64" }
38
+ - { tag: "server-cuda", dockerfile: ".devops/llama-server-cuda.Dockerfile", platforms: "linux/amd64" }
39
+ - { tag: "full-cuda", dockerfile: ".devops/full-cuda.Dockerfile", platforms: "linux/amd64" }
40
+ - { tag: "light-rocm", dockerfile: ".devops/llama-cli-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" }
41
+ - { tag: "server-rocm", dockerfile: ".devops/llama-server-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" }
42
+ # Note: the full-rocm image is failing due to a "no space left on device" error. It is disabled for now to allow the workflow to complete.
43
+ #- { tag: "full-rocm", dockerfile: ".devops/full-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" }
44
+ - { tag: "light-intel", dockerfile: ".devops/llama-cli-intel.Dockerfile", platforms: "linux/amd64" }
45
+ - { tag: "server-intel", dockerfile: ".devops/llama-server-intel.Dockerfile", platforms: "linux/amd64" }
46
+ steps:
47
+ - name: Check out the repo
48
+ uses: actions/checkout@v4
49
+
50
+ - name: Set up QEMU
51
+ uses: docker/setup-qemu-action@v2
52
+
53
+ - name: Set up Docker Buildx
54
+ uses: docker/setup-buildx-action@v2
55
+
56
+ - name: Log in to Docker Hub
57
+ uses: docker/login-action@v2
58
+ with:
59
+ registry: ghcr.io
60
+ username: ${{ github.repository_owner }}
61
+ password: ${{ secrets.GITHUB_TOKEN }}
62
+
63
+ # https://github.com/jlumbroso/free-disk-space/tree/54081f138730dfa15788a46383842cd2f914a1be#example
64
+ - name: Free Disk Space (Ubuntu)
65
+ uses: jlumbroso/free-disk-space@main
66
+ with:
67
+ # this might remove tools that are actually needed,
68
+ # if set to "true" but frees about 6 GB
69
+ tool-cache: false
70
+
71
+ # all of these default to true, but feel free to set to
72
+ # "false" if necessary for your workflow
73
+ android: true
74
+ dotnet: true
75
+ haskell: true
76
+ large-packages: true
77
+ docker-images: true
78
+ swap-storage: true
79
+
80
+ - name: Determine tag name
81
+ id: tag
82
+ shell: bash
83
+ run: |
84
+ BUILD_NUMBER="$(git rev-list --count HEAD)"
85
+ SHORT_HASH="$(git rev-parse --short=7 HEAD)"
86
+ if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then
87
+ echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT
88
+ else
89
+ SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-')
90
+ echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT
91
+ fi
92
+
93
+ - name: Downcase github.repository_owner
94
+ run: |
95
+ echo "repository_owner_lowercase=${GITHUB_REPOSITORY_OWNER@L}" >> $GITHUB_ENV
96
+ env:
97
+ GITHUB_REPOSITORY_OWNER: '${{ github.repository_owner }}'
98
+
99
+ - name: Build and push Docker image (versioned)
100
+ if: github.event_name == 'push'
101
+ uses: docker/build-push-action@v4
102
+ with:
103
+ context: .
104
+ push: true
105
+ platforms: ${{ matrix.config.platforms }}
106
+ tags: "ghcr.io/${{ env.repository_owner_lowercase }}/llama.cpp:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}"
107
+ file: ${{ matrix.config.dockerfile }}
108
+
109
+ - name: Build and push Docker image (tagged)
110
+ uses: docker/build-push-action@v4
111
+ with:
112
+ context: .
113
+ push: ${{ github.event_name == 'push' }}
114
+ platforms: ${{ matrix.config.platforms }}
115
+ tags: "ghcr.io/${{ env.repository_owner_lowercase }}/llama.cpp:${{ matrix.config.tag }},ghcr.io/${{ env.repository_owner_lowercase }}/llama.cpp:${{ matrix.config.tag }}-${{ steps.tag.outputs.name }}"
116
+ file: ${{ matrix.config.dockerfile }}
@@ -0,0 +1,27 @@
1
+ name: EditorConfig Checker
2
+
3
+ on:
4
+ workflow_dispatch: # allows manual triggering
5
+ inputs:
6
+ create_release:
7
+ description: 'Create new release'
8
+ required: true
9
+ type: boolean
10
+ push:
11
+ branches:
12
+ - master
13
+ pull_request:
14
+ branches:
15
+ - master
16
+
17
+ concurrency:
18
+ group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
19
+ cancel-in-progress: true
20
+
21
+ jobs:
22
+ editorconfig:
23
+ runs-on: ubuntu-latest
24
+ steps:
25
+ - uses: actions/checkout@v4
26
+ - uses: editorconfig-checker/action-editorconfig-checker@main
27
+ - run: editorconfig-checker
@@ -0,0 +1,44 @@
1
+ # This workflow will upload a Python Package using Twine when a GGUF release is created
2
+ # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3
+
4
+ # See `gguf-py/README.md` for how to make a release.
5
+
6
+ # This workflow uses actions that are not certified by GitHub.
7
+ # They are provided by a third-party and are governed by
8
+ # separate terms of service, privacy policy, and support
9
+ # documentation.
10
+
11
+ name: Upload Python Package
12
+
13
+ on:
14
+ workflow_dispatch:
15
+ push:
16
+ # Pattern matched against refs/tags
17
+ tags:
18
+ - 'gguf-v*' # Push events to every version tag
19
+
20
+
21
+ jobs:
22
+ deploy:
23
+
24
+ runs-on: ubuntu-latest
25
+
26
+ steps:
27
+ - uses: actions/checkout@v4
28
+ - name: Set up Python
29
+ uses: actions/setup-python@v5
30
+ with:
31
+ python-version: '3.9.x'
32
+ - name: Install dependencies
33
+ run: |
34
+ cd gguf-py
35
+ python -m pip install poetry
36
+ poetry install
37
+
38
+ - name: Build package
39
+ run: cd gguf-py && poetry build
40
+ - name: Publish package
41
+ uses: pypa/gh-action-pypi-publish@release/v1
42
+ with:
43
+ password: ${{ secrets.PYPI_API_TOKEN }}
44
+ packages-dir: gguf-py/dist
@@ -0,0 +1,17 @@
1
+ name: "Pull Request Labeler"
2
+ on:
3
+ - pull_request_target
4
+
5
+ jobs:
6
+ labeler:
7
+ permissions:
8
+ contents: read
9
+ pull-requests: write
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - uses: actions/checkout@v4
13
+ with:
14
+ repository: "ggerganov/llama.cpp"
15
+ - uses: actions/labeler@v5
16
+ with:
17
+ configuration-path: '.github/labeler.yml'
@@ -0,0 +1,65 @@
1
+ name: Nix aarch64 builds
2
+
3
+ on:
4
+ workflow_dispatch: # allows manual triggering
5
+ schedule:
6
+ # Rebuild daily rather than on every push because QEMU is expensive (e.g.
7
+ # 1.5h instead of minutes with the cold cache).
8
+ #
9
+ # randint(0, 59), randint(0, 23)
10
+ - cron: '26 12 * * *'
11
+ # But also rebuild if we touched any of the Nix expressions:
12
+ push:
13
+ branches:
14
+ - master
15
+ paths: ['**/*.nix', 'flake.lock']
16
+ pull_request:
17
+ types: [opened, synchronize, reopened]
18
+ paths: ['**/*.nix', 'flake.lock']
19
+
20
+ concurrency:
21
+ group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
22
+ cancel-in-progress: true
23
+
24
+ jobs:
25
+ nix-build-aarch64:
26
+ runs-on: ubuntu-latest
27
+ steps:
28
+ - name: Checkout repository
29
+ uses: actions/checkout@v4
30
+ - name: Install QEMU
31
+ # Copy-paste from https://github.com/orgs/community/discussions/8305#discussioncomment-5888654
32
+ run: |
33
+ sudo apt-get update
34
+ sudo apt-get install -y qemu-user-static qemu-system-aarch64
35
+ sudo usermod -a -G kvm $USER
36
+ - name: Install Nix
37
+ uses: DeterminateSystems/nix-installer-action@v9
38
+ with:
39
+ github-token: ${{ secrets.GITHUB_TOKEN }}
40
+ extra-conf: |
41
+ extra-platforms = aarch64-linux
42
+ extra-system-features = nixos-test kvm
43
+ extra-substituters = https://llama-cpp.cachix.org https://cuda-maintainers.cachix.org
44
+ extra-trusted-public-keys = llama-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc= cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E=
45
+ - uses: DeterminateSystems/magic-nix-cache-action@v2
46
+ with:
47
+ upstream-cache: https://${{ matrix.cachixName }}.cachix.org
48
+ - name: Set-up cachix to push the results to
49
+ uses: cachix/cachix-action@v13
50
+ with:
51
+ authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
52
+ name: llama-cpp
53
+ - name: Show all output paths
54
+ run: >
55
+ nix run github:nix-community/nix-eval-jobs
56
+ -- --gc-roots-dir gcroot
57
+ --flake
58
+ ".#packages.aarch64-linux"
59
+ - name: Build
60
+ run: >
61
+ nix run github:Mic92/nix-fast-build
62
+ -- --skip-cached --no-nom
63
+ --systems aarch64-linux
64
+ --flake
65
+ ".#checks.aarch64-linux"
@@ -0,0 +1,72 @@
1
+ name: Nix CI
2
+
3
+ on:
4
+ workflow_dispatch: # allows manual triggering
5
+ push:
6
+ branches:
7
+ - master
8
+ pull_request:
9
+ types: [opened, synchronize, reopened]
10
+
11
+ concurrency:
12
+ group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
13
+ cancel-in-progress: true
14
+
15
+ jobs:
16
+ nix-eval:
17
+ strategy:
18
+ fail-fast: false
19
+ matrix:
20
+ os: [ ubuntu-latest, macos-latest ]
21
+ runs-on: ${{ matrix.os }}
22
+ steps:
23
+ - name: Checkout repository
24
+ uses: actions/checkout@v4
25
+ - name: Install Nix
26
+ uses: DeterminateSystems/nix-installer-action@v9
27
+ with:
28
+ github-token: ${{ secrets.GITHUB_TOKEN }}
29
+ extra-conf: |
30
+ extra-substituters = https://llama-cpp.cachix.org https://cuda-maintainers.cachix.org
31
+ extra-trusted-public-keys = llama-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc= cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E=
32
+ - uses: DeterminateSystems/magic-nix-cache-action@v2
33
+ with:
34
+ upstream-cache: https://${{ matrix.cachixName }}.cachix.org
35
+ - name: List all flake outputs
36
+ run: nix flake show --all-systems
37
+ - name: Show all output paths
38
+ run: >
39
+ nix run github:nix-community/nix-eval-jobs
40
+ -- --gc-roots-dir gcroot
41
+ --flake
42
+ ".#packages.$(nix eval --raw --impure --expr builtins.currentSystem)"
43
+ nix-build:
44
+ strategy:
45
+ fail-fast: false
46
+ matrix:
47
+ os: [ ubuntu-latest, macos-latest ]
48
+ runs-on: ${{ matrix.os }}
49
+ steps:
50
+ - name: Checkout repository
51
+ uses: actions/checkout@v4
52
+ - name: Install Nix
53
+ uses: DeterminateSystems/nix-installer-action@v9
54
+ with:
55
+ github-token: ${{ secrets.GITHUB_TOKEN }}
56
+ extra-conf: |
57
+ extra-substituters = https://llama-cpp.cachix.org https://cuda-maintainers.cachix.org
58
+ extra-trusted-public-keys = llama-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc= cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E=
59
+ - uses: DeterminateSystems/magic-nix-cache-action@v2
60
+ with:
61
+ upstream-cache: https://${{ matrix.cachixName }}.cachix.org
62
+ - name: Set-up cachix to push the results to
63
+ uses: cachix/cachix-action@v13
64
+ with:
65
+ authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
66
+ name: llama-cpp
67
+ - name: Build
68
+ run: >
69
+ nix run github:Mic92/nix-fast-build
70
+ -- --skip-cached --no-nom
71
+ --flake
72
+ ".#checks.$(nix eval --raw --impure --expr builtins.currentSystem)"
@@ -0,0 +1,22 @@
1
+ name: update-flake-lock
2
+ on:
3
+ workflow_dispatch:
4
+ schedule:
5
+ - cron: '0 0 * * 0' # runs weekly on Sunday at 00:00
6
+
7
+ jobs:
8
+ lockfile:
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - name: Checkout repository
12
+ uses: actions/checkout@v4
13
+ - name: Install Nix
14
+ uses: DeterminateSystems/nix-installer-action@main
15
+ - name: Update flake.lock
16
+ uses: DeterminateSystems/update-flake-lock@main
17
+ with:
18
+ pr-title: "nix: update flake.lock"
19
+ pr-labels: |
20
+ nix
21
+ pr-reviewers: philiptaron,SomeoneSerge
22
+ token: ${{ secrets.FLAKE_TOKEN }}
@@ -0,0 +1,36 @@
1
+ # Make the flake discoverable on https://flakestry.dev and https://flakehub.com/flakes
2
+ name: "Publish a flake to flakestry & flakehub"
3
+ on:
4
+ push:
5
+ tags:
6
+ - "*"
7
+ workflow_dispatch:
8
+ inputs:
9
+ tag:
10
+ description: "The existing tag to publish"
11
+ type: "string"
12
+ required: true
13
+ jobs:
14
+ flakestry-publish:
15
+ runs-on: ubuntu-latest
16
+ permissions:
17
+ id-token: "write"
18
+ contents: "read"
19
+ steps:
20
+ - uses: flakestry/flakestry-publish@main
21
+ with:
22
+ version: "${{ inputs.tag || github.ref_name }}"
23
+ flakehub-publish:
24
+ runs-on: "ubuntu-latest"
25
+ permissions:
26
+ id-token: "write"
27
+ contents: "read"
28
+ steps:
29
+ - uses: "actions/checkout@v4"
30
+ with:
31
+ ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}"
32
+ - uses: "DeterminateSystems/nix-installer-action@main"
33
+ - uses: "DeterminateSystems/flakehub-push@main"
34
+ with:
35
+ visibility: "public"
36
+ tag: "${{ inputs.tag }}"
@@ -0,0 +1,35 @@
1
+ name: Python check requirements.txt
2
+
3
+ on:
4
+ push:
5
+ paths:
6
+ - '.github/workflows/python-check-requirements.yml'
7
+ - 'scripts/check-requirements.sh'
8
+ - 'convert*.py'
9
+ - 'requirements.txt'
10
+ - 'requirements/*.txt'
11
+ pull_request:
12
+ paths:
13
+ - '.github/workflows/python-check-requirements.yml'
14
+ - 'scripts/check-requirements.sh'
15
+ - 'convert*.py'
16
+ - 'requirements.txt'
17
+ - 'requirements/*.txt'
18
+
19
+ concurrency:
20
+ group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
21
+ cancel-in-progress: true
22
+
23
+ jobs:
24
+ python-check-requirements:
25
+ runs-on: ubuntu-latest
26
+ name: check-requirements
27
+ steps:
28
+ - name: Check out source repository
29
+ uses: actions/checkout@v4
30
+ - name: Set up Python environment
31
+ uses: actions/setup-python@v5
32
+ with:
33
+ python-version: "3.11"
34
+ - name: Run check-requirements.sh script
35
+ run: bash scripts/check-requirements.sh
@@ -0,0 +1,23 @@
1
+ name: flake8 Lint
2
+
3
+ on: [push, pull_request]
4
+
5
+ concurrency:
6
+ group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
7
+ cancel-in-progress: true
8
+
9
+ jobs:
10
+ flake8-lint:
11
+ runs-on: ubuntu-latest
12
+ name: Lint
13
+ steps:
14
+ - name: Check out source repository
15
+ uses: actions/checkout@v4
16
+ - name: Set up Python environment
17
+ uses: actions/setup-python@v5
18
+ with:
19
+ python-version: "3.11"
20
+ - name: flake8 Lint
21
+ uses: py-actions/flake8@v2
22
+ with:
23
+ plugins: "flake8-no-print"
@@ -0,0 +1,38 @@
1
+ name: Python Type-Check
2
+
3
+ on:
4
+ push:
5
+ paths:
6
+ - '.github/workflows/python-type-check.yml'
7
+ - '**.py'
8
+ - '**/requirements*.txt'
9
+ pull_request:
10
+ paths:
11
+ - '.github/workflows/python-type-check.yml'
12
+ - '**.py'
13
+ - '**/requirements*.txt'
14
+
15
+ concurrency:
16
+ group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
17
+ cancel-in-progress: true
18
+
19
+ jobs:
20
+ python-type-check:
21
+ runs-on: ubuntu-latest
22
+ name: pyright type-check
23
+ steps:
24
+ - name: Check out source repository
25
+ uses: actions/checkout@v4
26
+ - name: Set up Python environment
27
+ uses: actions/setup-python@v5
28
+ with:
29
+ python-version: "3.11"
30
+ - name: Install Python dependencies
31
+ # TODO: use a venv
32
+ run: pip install -r requirements/requirements-all.txt
33
+ - name: Type-check with Pyright
34
+ uses: jakebailey/pyright-action@v2
35
+ with:
36
+ version: 1.1.370
37
+ level: warning
38
+ warnings: true