@vtstech/pi-ollama-sync 1.0.4 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +40 -0
- package/ollama-sync.js +31 -8
- package/package.json +3 -3
package/README.md
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# @vtstech/pi-ollama-sync
|
|
2
|
+
|
|
3
|
+
Ollama sync extension for the [Pi Coding Agent](https://github.com/badlogic/pi-mono).
|
|
4
|
+
|
|
5
|
+
Auto-populate `models.json` with all available Ollama models — works with local and remote instances.
|
|
6
|
+
|
|
7
|
+
## Install
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pi install "npm:@vtstech/pi-ollama-sync"
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Commands
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
/ollama-sync Sync from models.json URL (or localhost)
|
|
17
|
+
/ollama-sync https://your-tunnel-url Sync from a specific remote URL
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
## Features
|
|
21
|
+
|
|
22
|
+
- Queries Ollama `/api/tags` for available models (local or remote)
|
|
23
|
+
- Writes the actual Ollama URL back into `models.json` so other extensions pick it up
|
|
24
|
+
- URL priority: CLI argument → existing `models.json` baseUrl → `OLLAMA_HOST` env → localhost
|
|
25
|
+
- Preserves existing provider config (apiKey, compat settings)
|
|
26
|
+
- Defaults to `openai-completions` API mode
|
|
27
|
+
- Sorts models by size (smallest first)
|
|
28
|
+
- Auto-detects reasoning-capable models (deepseek-r1, qwq, o1, o3, think, reason)
|
|
29
|
+
- Merges with existing per-model settings
|
|
30
|
+
- Per-model metadata in sync report (parameter size, quantization level, model family)
|
|
31
|
+
- Registered as both `/ollama-sync` slash command and `ollama_sync` tool
|
|
32
|
+
|
|
33
|
+
## Links
|
|
34
|
+
|
|
35
|
+
- [Full Documentation](https://github.com/VTSTech/pi-coding-agent#ollama-sync-ollama-syncts)
|
|
36
|
+
- [Changelog](https://github.com/VTSTech/pi-coding-agent/blob/main/CHANGELOG.md)
|
|
37
|
+
|
|
38
|
+
## License
|
|
39
|
+
|
|
40
|
+
MIT — [VTSTech](https://www.vts-tech.org)
|
package/ollama-sync.js
CHANGED
|
@@ -2,14 +2,15 @@
|
|
|
2
2
|
import {
|
|
3
3
|
MODELS_JSON_PATH as MODELS_FILE,
|
|
4
4
|
fetchOllamaModels,
|
|
5
|
+
fetchContextLengthsBatched,
|
|
5
6
|
readModelsJson,
|
|
6
7
|
writeModelsJson,
|
|
7
8
|
isReasoningModel,
|
|
8
9
|
getOllamaBaseUrl
|
|
9
10
|
} from "@vtstech/pi-shared/ollama";
|
|
10
|
-
import { section, ok, warn, info } from "@vtstech/pi-shared/format";
|
|
11
|
+
import { section, ok, warn, info, bytesHuman, estimateVram } from "@vtstech/pi-shared/format";
|
|
11
12
|
var BRANDING = [
|
|
12
|
-
` \u26A1 Pi Ollama Sync v1.0.
|
|
13
|
+
` \u26A1 Pi Ollama Sync v1.0.6`,
|
|
13
14
|
` Written by VTSTech`,
|
|
14
15
|
` GitHub: https://github.com/VTSTech`,
|
|
15
16
|
` Website: www.vts-tech.org`
|
|
@@ -27,13 +28,16 @@ function getProviderConfig(existing) {
|
|
|
27
28
|
}
|
|
28
29
|
};
|
|
29
30
|
}
|
|
30
|
-
function buildModelEntry(m) {
|
|
31
|
+
function buildModelEntry(m, contextLength) {
|
|
32
|
+
const estimatedSize = estimateVram(m.details.parameter_size, m.details.quantization_level);
|
|
31
33
|
return {
|
|
32
34
|
id: m.name,
|
|
33
35
|
reasoning: isReasoningModel(m.name),
|
|
34
36
|
parameterSize: m.details.parameter_size,
|
|
35
37
|
quantizationLevel: m.details.quantization_level,
|
|
36
|
-
modelFamily: m.details.family || m.details.families?.[0] || "unknown"
|
|
38
|
+
modelFamily: m.details.family || m.details.families?.[0] || "unknown",
|
|
39
|
+
contextLength,
|
|
40
|
+
estimatedSize
|
|
37
41
|
};
|
|
38
42
|
}
|
|
39
43
|
function mergeModels(newModels, oldModels) {
|
|
@@ -74,7 +78,14 @@ function ollama_sync_temp_default(pi) {
|
|
|
74
78
|
return;
|
|
75
79
|
}
|
|
76
80
|
const sorted = [...models].sort((a, b) => a.size - b.size);
|
|
77
|
-
|
|
81
|
+
ctx.ui.setStatus("ollama-sync", "Fetching model details...");
|
|
82
|
+
const contextMap = await fetchContextLengthsBatched(
|
|
83
|
+
ollamaBaseUrl,
|
|
84
|
+
sorted.map((m) => m.name)
|
|
85
|
+
);
|
|
86
|
+
const newModels = sorted.map(
|
|
87
|
+
(m) => buildModelEntry(m, contextMap.get(m.name))
|
|
88
|
+
);
|
|
78
89
|
const oldIds = new Set(
|
|
79
90
|
existing.providers["ollama"]?.models?.map((m) => m.id) ?? []
|
|
80
91
|
);
|
|
@@ -96,8 +107,10 @@ function ollama_sync_temp_default(pi) {
|
|
|
96
107
|
lines.push(section("Synced Models"));
|
|
97
108
|
for (const m of newModels) {
|
|
98
109
|
lines.push(ok(m.id));
|
|
110
|
+
const ctxStr = m.contextLength != null ? m.contextLength.toLocaleString() : "?";
|
|
111
|
+
const sizeStr = m.estimatedSize ? bytesHuman(m.estimatedSize) : "?";
|
|
99
112
|
lines.push(
|
|
100
|
-
` Params: ${m.parameterSize ?? "?"} \xB7 Quant: ${m.quantizationLevel ?? "?"} \xB7 Family: ${m.modelFamily ?? "?"}`
|
|
113
|
+
` Params: ${m.parameterSize ?? "?"} \xB7 Quant: ${m.quantizationLevel ?? "?"} \xB7 Family: ${m.modelFamily ?? "?"} \xB7 Context: ${ctxStr} \xB7 VRAM: ~${sizeStr}`
|
|
101
114
|
);
|
|
102
115
|
}
|
|
103
116
|
if (added.length > 0 || removed.length > 0) {
|
|
@@ -153,7 +166,13 @@ function ollama_sync_temp_default(pi) {
|
|
|
153
166
|
try {
|
|
154
167
|
const models = await fetchOllamaModels(ollamaBaseUrl);
|
|
155
168
|
const sorted = [...models].sort((a, b) => a.size - b.size);
|
|
156
|
-
const
|
|
169
|
+
const contextMap = await fetchContextLengthsBatched(
|
|
170
|
+
ollamaBaseUrl,
|
|
171
|
+
sorted.map((m) => m.name)
|
|
172
|
+
);
|
|
173
|
+
const newModels = sorted.map(
|
|
174
|
+
(m) => buildModelEntry(m, contextMap.get(m.name))
|
|
175
|
+
);
|
|
157
176
|
const mergedModels = mergeModels(
|
|
158
177
|
newModels,
|
|
159
178
|
existing.providers["ollama"]?.models ?? []
|
|
@@ -165,7 +184,11 @@ function ollama_sync_temp_default(pi) {
|
|
|
165
184
|
};
|
|
166
185
|
writeModelsJson(existing);
|
|
167
186
|
const modelDetails = newModels.map(
|
|
168
|
-
(m) =>
|
|
187
|
+
(m) => {
|
|
188
|
+
const ctxStr = m.contextLength ?? "?";
|
|
189
|
+
const sizeStr = m.estimatedSize ? bytesHuman(m.estimatedSize) : "?";
|
|
190
|
+
return ` \u2022 ${m.id} (${m.parameterSize}, ${m.quantizationLevel}, ctx: ${ctxStr}, ~${sizeStr})`;
|
|
191
|
+
}
|
|
169
192
|
).join("\n");
|
|
170
193
|
return {
|
|
171
194
|
content: [
|
package/package.json
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@vtstech/pi-ollama-sync",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.6",
|
|
4
4
|
"description": "Ollama model sync extension for Pi Coding Agent",
|
|
5
5
|
"main": "ollama-sync.js",
|
|
6
|
-
"keywords": ["pi-
|
|
6
|
+
"keywords": ["pi-extensions"],
|
|
7
7
|
"license": "MIT",
|
|
8
8
|
"access": "public",
|
|
9
9
|
"type": "module",
|
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
"url": "https://github.com/VTSTech/pi-coding-agent"
|
|
15
15
|
},
|
|
16
16
|
"dependencies": {
|
|
17
|
-
"@vtstech/pi-shared": "1.0.
|
|
17
|
+
"@vtstech/pi-shared": "1.0.6"
|
|
18
18
|
},
|
|
19
19
|
"peerDependencies": {
|
|
20
20
|
"@mariozechner/pi-coding-agent": ">=0.66"
|