@shipworthy/ai-sdk-llama-cpp 0.2.4 → 0.2.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/llama.cpp.bundle +0 -0
- package/package.json +5 -4
- package/scripts/postinstall.cjs +77 -55
package/llama.cpp.bundle
ADDED
|
Binary file
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@shipworthy/ai-sdk-llama-cpp",
|
|
3
|
-
"version": "0.2.
|
|
3
|
+
"version": "0.2.5",
|
|
4
4
|
"description": "A minimal llama.cpp provider for the Vercel AI SDK implementing LanguageModelV3 and EmbeddingModelV3",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -16,11 +16,12 @@
|
|
|
16
16
|
"dist",
|
|
17
17
|
"native",
|
|
18
18
|
"scripts",
|
|
19
|
-
"CMakeLists.txt"
|
|
19
|
+
"CMakeLists.txt",
|
|
20
|
+
"llama.cpp.bundle"
|
|
20
21
|
],
|
|
21
22
|
"llamaCpp": {
|
|
22
|
-
"repo": "https://github.com/
|
|
23
|
-
"
|
|
23
|
+
"repo": "https://github.com/ggml-org/llama.cpp.git",
|
|
24
|
+
"tag": "b7624"
|
|
24
25
|
},
|
|
25
26
|
"scripts": {
|
|
26
27
|
"postinstall": "node scripts/postinstall.cjs",
|
package/scripts/postinstall.cjs
CHANGED
|
@@ -1,74 +1,96 @@
|
|
|
1
|
+
// @ts-check
|
|
1
2
|
const { execSync } = require("child_process");
|
|
2
3
|
const fs = require("fs");
|
|
3
4
|
const path = require("path");
|
|
4
5
|
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
// console.error(`Detected platform: ${process.platform}`);
|
|
11
|
-
// console.error("This package requires macOS for native compilation.\n");
|
|
12
|
-
// process.exit(1);
|
|
13
|
-
// }
|
|
14
|
-
|
|
15
|
-
// Step 2: Check if git is available
|
|
16
|
-
try {
|
|
17
|
-
execSync("git --version", { stdio: "ignore" });
|
|
18
|
-
} catch (error) {
|
|
19
|
-
console.error("\n===========================================");
|
|
20
|
-
console.error("ERROR: git is required but not found");
|
|
21
|
-
console.error("===========================================\n");
|
|
22
|
-
console.error("Please install git to continue.");
|
|
23
|
-
console.error(" macOS: xcode-select --install");
|
|
24
|
-
console.error(" or: brew install git\n");
|
|
25
|
-
process.exit(1);
|
|
6
|
+
if (process.platform !== "darwin") {
|
|
7
|
+
console.warn("===========================================\n");
|
|
8
|
+
console.warn(`Detected platform: ${process.platform}`);
|
|
9
|
+
console.warn("ai-sdk-llama-cpp requires macOS for native compilation.\n");
|
|
10
|
+
process.exit(0);
|
|
26
11
|
}
|
|
27
12
|
|
|
28
|
-
|
|
29
|
-
const
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
13
|
+
const ROOT_DIR = path.join(__dirname, "..")
|
|
14
|
+
const LLAMA_CPP_DIR = path.join(ROOT_DIR, "llama.cpp")
|
|
15
|
+
const LLAMA_CPP_BUNDLE = path.join(ROOT_DIR, "llama.cpp.bundle")
|
|
16
|
+
|
|
17
|
+
function cloneLlamaCpp() {
|
|
18
|
+
if (fs.existsSync(LLAMA_CPP_DIR)) {
|
|
19
|
+
console.log("llama.cpp directory already exists, skipping clone");
|
|
20
|
+
return;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
if (fs.existsSync(LLAMA_CPP_BUNDLE)) {
|
|
24
|
+
console.log("Cloning llama.cpp from llama.cpp.bundle");
|
|
25
|
+
execSync(`git clone ${LLAMA_CPP_BUNDLE} ${LLAMA_CPP_DIR}`, {
|
|
26
|
+
stdio: "inherit",
|
|
27
|
+
});
|
|
28
|
+
return;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
createLlamaReleaseBundle()
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
function createLlamaReleaseBundle() {
|
|
34
35
|
const packageJsonPath = path.join(__dirname, "..", "package.json");
|
|
35
36
|
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, "utf8"));
|
|
36
|
-
const { repo,
|
|
37
|
+
const { repo, tag } = packageJson.llamaCpp;
|
|
37
38
|
|
|
38
|
-
if (!repo || !
|
|
39
|
+
if (!repo || !tag) {
|
|
39
40
|
console.error(
|
|
40
|
-
"ERROR: llamaCpp
|
|
41
|
+
"ERROR: llamaCpp repo and tag must be defined in package.json"
|
|
41
42
|
);
|
|
42
43
|
process.exit(1);
|
|
43
44
|
}
|
|
44
45
|
|
|
45
|
-
|
|
46
|
-
console.log(`Cloning llama.cpp at commit ${commit}...`);
|
|
47
|
-
try {
|
|
48
|
-
// Clone with depth 1 for faster download, then fetch the specific commit
|
|
49
|
-
execSync(`git clone --depth 1 ${repo} llama.cpp`, {
|
|
50
|
-
stdio: "inherit",
|
|
51
|
-
cwd: path.join(__dirname, ".."),
|
|
52
|
-
});
|
|
46
|
+
console.log(`Cloning llama.cpp from release ${tag}`);
|
|
53
47
|
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
cwd: llamaCppPath,
|
|
58
|
-
});
|
|
59
|
-
execSync(`git checkout ${commit}`, {
|
|
60
|
-
stdio: "inherit",
|
|
61
|
-
cwd: llamaCppPath,
|
|
62
|
-
});
|
|
48
|
+
execSync(`git clone --depth 1 --branch ${tag} ${repo} ${LLAMA_CPP_DIR}`, {
|
|
49
|
+
stdio: "inherit",
|
|
50
|
+
});
|
|
63
51
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
52
|
+
execSync(`git fetch --unshallow`, {
|
|
53
|
+
stdio: "inherit",
|
|
54
|
+
cwd: LLAMA_CPP_DIR,
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
// Squash all history into a single commit
|
|
58
|
+
const newSha = execSync(`git commit-tree HEAD^{tree} -m "Squashed"`, {
|
|
59
|
+
stdio: "pipe",
|
|
60
|
+
cwd: LLAMA_CPP_DIR,
|
|
61
|
+
});
|
|
62
|
+
execSync(`git reset --hard ${newSha}`, {
|
|
63
|
+
stdio: "inherit",
|
|
64
|
+
cwd: LLAMA_CPP_DIR,
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
// Delete all tags (reduces bundle size)
|
|
68
|
+
execSync(`git tag -l | xargs git tag -d`, {
|
|
69
|
+
stdio: "inherit",
|
|
70
|
+
cwd: LLAMA_CPP_DIR,
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
// Aggressive garbage collection (major size reduction)
|
|
74
|
+
execSync(`git gc --aggressive --prune=all`, {
|
|
75
|
+
stdio: "inherit",
|
|
76
|
+
cwd: LLAMA_CPP_DIR,
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
execSync(`git bundle create ../llama.cpp.bundle HEAD`, {
|
|
80
|
+
stdio: "inherit",
|
|
81
|
+
cwd: LLAMA_CPP_DIR,
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
console.log("llama.cpp cloned and bundled successfully.");
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
try {
|
|
88
|
+
cloneLlamaCpp()
|
|
89
|
+
} catch (error) {
|
|
90
|
+
console.error("ERROR: Failed to clone llama.cpp");
|
|
91
|
+
console.error(error.message);
|
|
92
|
+
process.exit(1);
|
|
70
93
|
}
|
|
71
94
|
|
|
72
|
-
|
|
73
|
-
console.log("Building native llama.cpp bindings for macOS...");
|
|
95
|
+
console.log("Building native llama.cpp bindings");
|
|
74
96
|
execSync("npx cmake-js compile", { stdio: "inherit" });
|