@shipworthy/ai-sdk-llama-cpp 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/CMakeLists.txt +6 -0
  2. package/LICENSE +21 -0
  3. package/README.md +274 -0
  4. package/dist/binding-bun.d.ts +7 -0
  5. package/dist/binding-bun.d.ts.map +1 -0
  6. package/dist/binding-bun.js +354 -0
  7. package/dist/binding-bun.js.map +1 -0
  8. package/dist/binding-node.d.ts +7 -0
  9. package/dist/binding-node.d.ts.map +1 -0
  10. package/dist/binding-node.js +59 -0
  11. package/dist/binding-node.js.map +1 -0
  12. package/dist/binding.d.ts +67 -0
  13. package/dist/binding.d.ts.map +1 -0
  14. package/dist/binding.js +105 -0
  15. package/dist/binding.js.map +1 -0
  16. package/dist/index.d.ts +5 -0
  17. package/dist/index.d.ts.map +1 -0
  18. package/dist/index.js +8 -0
  19. package/dist/index.js.map +1 -0
  20. package/dist/llama-cpp-embedding-model.d.ts +28 -0
  21. package/dist/llama-cpp-embedding-model.d.ts.map +1 -0
  22. package/dist/llama-cpp-embedding-model.js +78 -0
  23. package/dist/llama-cpp-embedding-model.js.map +1 -0
  24. package/dist/llama-cpp-language-model.d.ts +55 -0
  25. package/dist/llama-cpp-language-model.d.ts.map +1 -0
  26. package/dist/llama-cpp-language-model.js +221 -0
  27. package/dist/llama-cpp-language-model.js.map +1 -0
  28. package/dist/llama-cpp-provider.d.ts +82 -0
  29. package/dist/llama-cpp-provider.d.ts.map +1 -0
  30. package/dist/llama-cpp-provider.js +71 -0
  31. package/dist/llama-cpp-provider.js.map +1 -0
  32. package/dist/native-binding.d.ts +51 -0
  33. package/dist/native-binding.d.ts.map +1 -0
  34. package/dist/native-binding.js +74 -0
  35. package/dist/native-binding.js.map +1 -0
  36. package/native/CMakeLists.txt +74 -0
  37. package/native/binding.cpp +522 -0
  38. package/native/llama-wrapper.cpp +519 -0
  39. package/native/llama-wrapper.h +131 -0
  40. package/package.json +79 -0
  41. package/scripts/postinstall.cjs +74 -0
package/package.json ADDED
@@ -0,0 +1,79 @@
1
+ {
2
+ "name": "@shipworthy/ai-sdk-llama-cpp",
3
+ "version": "0.2.0",
4
+ "description": "A minimal llama.cpp provider for the Vercel AI SDK implementing LanguageModelV3 and EmbeddingModelV3",
5
+ "type": "module",
6
+ "main": "./dist/index.js",
7
+ "types": "./dist/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "import": "./dist/index.js",
11
+ "types": "./dist/index.d.ts"
12
+ }
13
+ },
14
+ "files": [
15
+ "dist",
16
+ "native",
17
+ "scripts",
18
+ "CMakeLists.txt"
19
+ ],
20
+ "llamaCpp": {
21
+ "repo": "https://github.com/ggerganov/llama.cpp.git",
22
+ "commit": "06705fdcb3ef199d2c2c95a5e3cbb7cf9cc5256e"
23
+ },
24
+ "scripts": {
25
+ "postinstall": "node scripts/postinstall.cjs",
26
+ "build:native": "cmake-js compile",
27
+ "build:native:debug": "cmake-js compile --debug",
28
+ "build:ts": "tsc",
29
+ "build": "npm run build:native && npm run build:ts",
30
+ "clean": "rm -rf dist build",
31
+ "prepublishOnly": "npm run build:ts",
32
+ "test": "vitest",
33
+ "test:run": "vitest run",
34
+ "test:unit": "vitest run tests/unit",
35
+ "test:integration": "vitest run tests/integration",
36
+ "test:e2e": "vitest run tests/e2e",
37
+ "test:coverage": "vitest run --coverage"
38
+ },
39
+ "keywords": [
40
+ "ai",
41
+ "llama",
42
+ "llama.cpp",
43
+ "vercel",
44
+ "ai-sdk",
45
+ "language-model",
46
+ "gguf"
47
+ ],
48
+ "author": "Lars Grammel",
49
+ "license": "MIT",
50
+ "repository": {
51
+ "type": "git",
52
+ "url": "git+https://github.com/lgrammel/ai-sdk-llama-cpp.git"
53
+ },
54
+ "bugs": {
55
+ "url": "https://github.com/lgrammel/ai-sdk-llama-cpp/issues"
56
+ },
57
+ "homepage": "https://github.com/lgrammel/ai-sdk-llama-cpp#readme",
58
+ "engines": {
59
+ "node": ">=18.0.0"
60
+ },
61
+ "dependencies": {
62
+ "@ai-sdk/provider": "^3.0.0",
63
+ "cmake-js": "^7.3.0",
64
+ "node-addon-api": "^8.0.0"
65
+ },
66
+ "devDependencies": {
67
+ "@types/node": "^20.10.0",
68
+ "@vitest/coverage-v8": "^2.1.8",
69
+ "ai": "^6.0.3",
70
+ "tsx": "^4.21.0",
71
+ "typescript": "^5.3.0",
72
+ "vitest": "^2.1.8"
73
+ },
74
+ "binary": {
75
+ "napi_versions": [
76
+ 8
77
+ ]
78
+ }
79
+ }
@@ -0,0 +1,74 @@
1
+ const { execSync } = require("child_process");
2
+ const fs = require("fs");
3
+ const path = require("path");
4
+
5
+ // Step 1: Check platform is macOS
6
+ if (process.platform !== "darwin") {
7
+ console.error("\n===========================================");
8
+ console.error("ERROR: ai-sdk-llama-cpp only supports macOS");
9
+ console.error("===========================================\n");
10
+ console.error(`Detected platform: ${process.platform}`);
11
+ console.error("This package requires macOS for native compilation.\n");
12
+ process.exit(1);
13
+ }
14
+
15
+ // Step 2: Check if git is available
16
+ try {
17
+ execSync("git --version", { stdio: "ignore" });
18
+ } catch (error) {
19
+ console.error("\n===========================================");
20
+ console.error("ERROR: git is required but not found");
21
+ console.error("===========================================\n");
22
+ console.error("Please install git to continue.");
23
+ console.error(" macOS: xcode-select --install");
24
+ console.error(" or: brew install git\n");
25
+ process.exit(1);
26
+ }
27
+
28
+ // Step 3: Check if llama.cpp already exists (for local development)
29
+ const llamaCppPath = path.join(__dirname, "..", "llama.cpp");
30
+ if (fs.existsSync(llamaCppPath)) {
31
+ console.log("llama.cpp directory already exists, skipping clone...");
32
+ } else {
33
+ // Step 4: Read commit hash from package.json
34
+ const packageJsonPath = path.join(__dirname, "..", "package.json");
35
+ const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, "utf8"));
36
+ const { repo, commit } = packageJson.llamaCpp;
37
+
38
+ if (!repo || !commit) {
39
+ console.error(
40
+ "ERROR: llamaCpp.repo and llamaCpp.commit must be defined in package.json"
41
+ );
42
+ process.exit(1);
43
+ }
44
+
45
+ // Step 5: Clone llama.cpp at the specific commit
46
+ console.log(`Cloning llama.cpp at commit ${commit}...`);
47
+ try {
48
+ // Clone with depth 1 for faster download, then fetch the specific commit
49
+ execSync(`git clone --depth 1 ${repo} llama.cpp`, {
50
+ stdio: "inherit",
51
+ cwd: path.join(__dirname, ".."),
52
+ });
53
+
54
+ // Fetch the specific commit and checkout
55
+ execSync(`git fetch --depth 1 origin ${commit}`, {
56
+ stdio: "inherit",
57
+ cwd: llamaCppPath,
58
+ });
59
+ execSync(`git checkout ${commit}`, {
60
+ stdio: "inherit",
61
+ cwd: llamaCppPath,
62
+ });
63
+
64
+ console.log("llama.cpp cloned successfully.");
65
+ } catch (error) {
66
+ console.error("ERROR: Failed to clone llama.cpp");
67
+ console.error(error.message);
68
+ process.exit(1);
69
+ }
70
+ }
71
+
72
+ // Step 6: Build native bindings
73
+ console.log("Building native llama.cpp bindings for macOS...");
74
+ execSync("npx cmake-js compile", { stdio: "inherit" });