pi-agent-extensions 0.2.2 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md
CHANGED
|
@@ -32,6 +32,7 @@ Original repository: https://github.com/mitsuhiko/agent-stuff
|
|
|
32
32
|
| **control** | RPC | Inter-session communication & control | ⚙️ Beta |
|
|
33
33
|
| **answer** | Tool | Structured Q&A for complex queries | ⚙️ Beta |
|
|
34
34
|
| **cwd_history** | Tracker | Tracks directory changes in context | ✅ Stable |
|
|
35
|
+
| **nvidia-nim** | Command | Nvidia NIM auth & config | ✅ Stable |
|
|
35
36
|
|
|
36
37
|
## Install
|
|
37
38
|
|
|
@@ -295,6 +296,13 @@ RPC-based session control. Allows sessions to talk to each other (e.g., a "manag
|
|
|
295
296
|
- Flag: `--session-control`
|
|
296
297
|
- Tool: `send_to_session`
|
|
297
298
|
|
|
299
|
+
**Nvidia NIM (`/nvidia-nim-auth`)**
|
|
300
|
+
Authenticate and configure Nvidia NIM as an LLM provider.
|
|
301
|
+
- Commands: `/nvidia-nim-auth` (alias: `/nvidia-auth`), `/nvidia-nim-models`
|
|
302
|
+
- Saves provider config to `~/.pi/nvidia-nim.json`
|
|
303
|
+
- Adds configured models to `~/.pi/agent/settings.json` `enabledModels` for scoped `/model` + Ctrl+P cycling
|
|
304
|
+
- Model IDs must be `org/model` (exactly one `/`), e.g. `moonshotai/kimi-k2.5` (not `nvidia/moonshotai/kimi-k2.5`)
|
|
305
|
+
|
|
298
306
|
## Development
|
|
299
307
|
|
|
300
308
|
```bash
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
# Nvidia NIM Extension
|
|
2
|
+
|
|
3
|
+
Authenticate and configure the Nvidia NIM provider for the Pi coding agent. Models you add appear directly in the `/model` picker.
|
|
4
|
+
|
|
5
|
+
## Commands
|
|
6
|
+
|
|
7
|
+
| Command | Description |
|
|
8
|
+
|---------|-------------|
|
|
9
|
+
| `/nvidia-nim-auth` | Full setup — API key + model editor |
|
|
10
|
+
| `/nvidia-auth` | Alias for `/nvidia-nim-auth` |
|
|
11
|
+
| `/nvidia-nim-models` | Add/edit models (keeps existing API key) |
|
|
12
|
+
|
|
13
|
+
## Usage
|
|
14
|
+
|
|
15
|
+
1. **Get an API key** from [build.nvidia.com](https://build.nvidia.com)
|
|
16
|
+
2. Run `/nvidia-nim-auth` in Pi
|
|
17
|
+
3. Paste your `nvapi-...` key
|
|
18
|
+
4. Add model IDs in the editor (one per line), e.g.:
|
|
19
|
+
```
|
|
20
|
+
meta/llama-3.1-405b-instruct
|
|
21
|
+
deepseek-ai/deepseek-r1
|
|
22
|
+
moonshotai/kimi-k2.5
|
|
23
|
+
nvidia/llama-3.1-nemotron-70b-instruct
|
|
24
|
+
```
|
|
25
|
+
5. Use `/model` to switch to any registered Nvidia model (also available in scoped model list / Ctrl+P cycling)
|
|
26
|
+
|
|
27
|
+
To add more models later without re-entering your key, use `/nvidia-nim-models`.
|
|
28
|
+
|
|
29
|
+
### Model ID format
|
|
30
|
+
|
|
31
|
+
Model IDs must be `org/model` (exactly one `/`).
|
|
32
|
+
|
|
33
|
+
✅ Valid:
|
|
34
|
+
- `moonshotai/kimi-k2.5`
|
|
35
|
+
- `nvidia/llama-3.1-nemotron-70b-instruct`
|
|
36
|
+
|
|
37
|
+
❌ Invalid:
|
|
38
|
+
- `nvidia/moonshotai/kimi-k2.5` (this is provider + org + model)
|
|
39
|
+
|
|
40
|
+
Invalid lines are ignored during parsing.
|
|
41
|
+
|
|
42
|
+
## Configuration
|
|
43
|
+
|
|
44
|
+
Config is saved to `~/.pi/nvidia-nim.json`:
|
|
45
|
+
|
|
46
|
+
```json
|
|
47
|
+
{
|
|
48
|
+
"apiKey": "nvapi-...",
|
|
49
|
+
"models": [
|
|
50
|
+
{ "id": "meta/llama-3.1-405b-instruct", "name": "Llama 3.1 405b Instruct", "reasoning": false },
|
|
51
|
+
{ "id": "deepseek-ai/deepseek-r1", "name": "Deepseek R1", "reasoning": true }
|
|
52
|
+
]
|
|
53
|
+
}
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
Models are registered via `pi.registerProvider()` on startup and after auth, so they show up in `/model` immediately.
|
|
57
|
+
|
|
58
|
+
The extension also updates `~/.pi/agent/settings.json` `enabledModels` entries for provider `nvidia`, so configured models appear in scoped `/model` and Ctrl+P cycling.
|
|
59
|
+
|
|
60
|
+
## Browse Models
|
|
61
|
+
|
|
62
|
+
See all available Nvidia NIM models at: [build.nvidia.com/models](https://build.nvidia.com/models)
|
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
import type { ExtensionAPI, ExtensionCommandContext } from "@mariozechner/pi-coding-agent";
|
|
2
|
+
import * as fs from "node:fs/promises";
|
|
3
|
+
import * as fsSync from "node:fs";
|
|
4
|
+
import * as path from "node:path";
|
|
5
|
+
import * as os from "node:os";
|
|
6
|
+
|
|
7
|
+
/** Persisted config shape */
|
|
8
|
+
export interface NvidiaNimConfig {
|
|
9
|
+
apiKey: string;
|
|
10
|
+
models: NvidiaModelEntry[];
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export interface NvidiaModelEntry {
|
|
14
|
+
id: string;
|
|
15
|
+
name: string;
|
|
16
|
+
reasoning: boolean;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
const NVIDIA_BASE_URL = "https://integrate.api.nvidia.com/v1";
|
|
20
|
+
const NVIDIA_PROVIDER_NAME = "nvidia";
|
|
21
|
+
const MODEL_EDITOR_TEMPLATE = `# Nvidia NIM Models — one model ID per line
|
|
22
|
+
# Lines starting with # are ignored.
|
|
23
|
+
#
|
|
24
|
+
# Browse available models at: https://build.nvidia.com/models
|
|
25
|
+
#
|
|
26
|
+
# Examples:
|
|
27
|
+
# meta/llama-3.1-405b-instruct
|
|
28
|
+
# nvidia/llama-3.1-nemotron-70b-instruct
|
|
29
|
+
# deepseek-ai/deepseek-r1
|
|
30
|
+
# google/gemma-2-27b-it
|
|
31
|
+
# mistralai/mixtral-8x22b-instruct-v0.1
|
|
32
|
+
# qwen/qwen2.5-72b-instruct
|
|
33
|
+
`;
|
|
34
|
+
|
|
35
|
+
export function getConfigPath(): string {
|
|
36
|
+
return path.join(os.homedir(), ".pi", "nvidia-nim.json");
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
export async function loadConfig(): Promise<NvidiaNimConfig | null> {
|
|
40
|
+
try {
|
|
41
|
+
const content = await fs.readFile(getConfigPath(), "utf-8");
|
|
42
|
+
const parsed = JSON.parse(content);
|
|
43
|
+
if (parsed.apiKey && Array.isArray(parsed.models)) {
|
|
44
|
+
return parsed as NvidiaNimConfig;
|
|
45
|
+
}
|
|
46
|
+
return null;
|
|
47
|
+
} catch {
|
|
48
|
+
return null;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
/** Synchronous version for use during extension init (before Pi finishes loading) */
|
|
53
|
+
export function loadConfigSync(): NvidiaNimConfig | null {
|
|
54
|
+
try {
|
|
55
|
+
const content = fsSync.readFileSync(getConfigPath(), "utf-8");
|
|
56
|
+
const parsed = JSON.parse(content);
|
|
57
|
+
if (parsed.apiKey && Array.isArray(parsed.models)) {
|
|
58
|
+
return parsed as NvidiaNimConfig;
|
|
59
|
+
}
|
|
60
|
+
return null;
|
|
61
|
+
} catch {
|
|
62
|
+
return null;
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
export async function saveConfig(config: NvidiaNimConfig): Promise<void> {
|
|
67
|
+
const configPath = getConfigPath();
|
|
68
|
+
await fs.mkdir(path.dirname(configPath), { recursive: true });
|
|
69
|
+
await fs.writeFile(configPath, JSON.stringify(config, null, 2), "utf-8");
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
/** Parse model IDs from editor text, ignoring comments and blank lines.
|
|
73
|
+
* Model IDs must be in "org/model" format (exactly one "/").
|
|
74
|
+
* Lines with more than one "/" are skipped (e.g. "nvidia/moonshotai/kimi-k2.5" is invalid). */
|
|
75
|
+
export function parseModelLines(text: string): NvidiaModelEntry[] {
|
|
76
|
+
const seen = new Set<string>();
|
|
77
|
+
return text
|
|
78
|
+
.split("\n")
|
|
79
|
+
.map((line) => line.trim())
|
|
80
|
+
.filter((line) => line.length > 0 && !line.startsWith("#"))
|
|
81
|
+
.filter((line) => (line.match(/\//g) || []).length === 1)
|
|
82
|
+
.reduce<NvidiaModelEntry[]>((acc, id) => {
|
|
83
|
+
if (!seen.has(id)) {
|
|
84
|
+
seen.add(id);
|
|
85
|
+
acc.push({
|
|
86
|
+
id,
|
|
87
|
+
name: formatModelName(id),
|
|
88
|
+
reasoning: /deepseek-r1|reasoning/i.test(id),
|
|
89
|
+
});
|
|
90
|
+
}
|
|
91
|
+
return acc;
|
|
92
|
+
}, []);
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/** Turn "meta/llama-3.1-405b-instruct" → "Llama 3.1 405B Instruct" */
|
|
96
|
+
function formatModelName(id: string): string {
|
|
97
|
+
const base = id.includes("/") ? id.split("/").pop()! : id;
|
|
98
|
+
return base
|
|
99
|
+
.replace(/[-_]/g, " ")
|
|
100
|
+
.replace(/\b\w/g, (c) => c.toUpperCase());
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
/** Get the path to ~/.pi/agent/settings.json */
|
|
104
|
+
export function getAgentSettingsPath(): string {
|
|
105
|
+
return path.join(os.homedir(), ".pi", "agent", "settings.json");
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* Add nvidia model IDs to enabledModels in ~/.pi/agent/settings.json
|
|
110
|
+
* so they show up in the scoped /model view and Ctrl+P cycling.
|
|
111
|
+
* Removes any stale nvidia/ entries first, then appends the new ones.
|
|
112
|
+
*/
|
|
113
|
+
export async function updateEnabledModels(models: NvidiaModelEntry[]): Promise<void> {
|
|
114
|
+
const settingsPath = getAgentSettingsPath();
|
|
115
|
+
|
|
116
|
+
let settings: Record<string, any> = {};
|
|
117
|
+
try {
|
|
118
|
+
const content = await fs.readFile(settingsPath, "utf-8");
|
|
119
|
+
if (content.trim()) {
|
|
120
|
+
settings = JSON.parse(content);
|
|
121
|
+
}
|
|
122
|
+
} catch (err: any) {
|
|
123
|
+
if (err.code !== "ENOENT") throw err;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
const existing: string[] = Array.isArray(settings.enabledModels) ? settings.enabledModels : [];
|
|
127
|
+
|
|
128
|
+
// Remove old nvidia/ entries
|
|
129
|
+
const filtered = existing.filter((id: string) => !id.startsWith(`${NVIDIA_PROVIDER_NAME}/`));
|
|
130
|
+
|
|
131
|
+
// Add new nvidia models with provider prefix
|
|
132
|
+
const nvidiaIds = models.map((m) => `${NVIDIA_PROVIDER_NAME}/${m.id}`);
|
|
133
|
+
settings.enabledModels = [...filtered, ...nvidiaIds];
|
|
134
|
+
|
|
135
|
+
await fs.mkdir(path.dirname(settingsPath), { recursive: true });
|
|
136
|
+
await fs.writeFile(settingsPath, JSON.stringify(settings, null, 2), "utf-8");
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Fetch available model IDs from the Nvidia NIM API.
|
|
141
|
+
* Returns the set of valid model IDs, or null on failure.
|
|
142
|
+
*/
|
|
143
|
+
export async function fetchAvailableModels(apiKey: string): Promise<Set<string> | null> {
|
|
144
|
+
try {
|
|
145
|
+
const res = await fetch(`${NVIDIA_BASE_URL}/models`, {
|
|
146
|
+
headers: { Authorization: `Bearer ${apiKey}` },
|
|
147
|
+
});
|
|
148
|
+
if (!res.ok) return null;
|
|
149
|
+
const data = (await res.json()) as { data?: { id: string }[] };
|
|
150
|
+
if (!data.data) return null;
|
|
151
|
+
return new Set(data.data.map((m) => m.id));
|
|
152
|
+
} catch {
|
|
153
|
+
return null;
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
/** Register the nvidia provider with pi so models appear in /model */
|
|
158
|
+
export function registerProvider(pi: ExtensionAPI, config: NvidiaNimConfig): void {
|
|
159
|
+
pi.registerProvider("nvidia", {
|
|
160
|
+
baseUrl: NVIDIA_BASE_URL,
|
|
161
|
+
apiKey: config.apiKey,
|
|
162
|
+
api: "openai-completions",
|
|
163
|
+
models: config.models.map((m) => ({
|
|
164
|
+
id: m.id,
|
|
165
|
+
name: m.name,
|
|
166
|
+
reasoning: m.reasoning,
|
|
167
|
+
input: ["text"] as ("text" | "image")[],
|
|
168
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
169
|
+
contextWindow: 128000,
|
|
170
|
+
maxTokens: 16384,
|
|
171
|
+
})),
|
|
172
|
+
});
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
export default function nvidiaNimExtension(pi: ExtensionAPI) {
|
|
176
|
+
// Register synchronously at init so models are available before Pi finishes loading
|
|
177
|
+
const savedConfig = loadConfigSync();
|
|
178
|
+
if (savedConfig && savedConfig.models.length > 0) {
|
|
179
|
+
registerProvider(pi, savedConfig);
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
// --- /nvidia-nim-auth: full setup (API key + models) ---
|
|
183
|
+
const authHandler = async (args: string | undefined, ctx: ExtensionCommandContext) => {
|
|
184
|
+
if (!ctx.hasUI) {
|
|
185
|
+
console.log("This command requires interactive mode.");
|
|
186
|
+
return;
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
const existing = await loadConfig();
|
|
190
|
+
|
|
191
|
+
// Step 1: API Key
|
|
192
|
+
let apiKey = await ctx.ui.input(
|
|
193
|
+
"Nvidia NIM — Enter API Key",
|
|
194
|
+
existing ? "(current key saved — paste new key or press Enter to keep)" : "Paste your nvapi-... key from build.nvidia.com",
|
|
195
|
+
);
|
|
196
|
+
|
|
197
|
+
if (!apiKey?.trim() && existing?.apiKey) {
|
|
198
|
+
apiKey = existing.apiKey;
|
|
199
|
+
} else if (!apiKey?.trim()) {
|
|
200
|
+
ctx.ui.notify("Setup cancelled — API key is required.", "error");
|
|
201
|
+
return;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
apiKey = apiKey.trim();
|
|
205
|
+
if (!apiKey.startsWith("nvapi-")) {
|
|
206
|
+
const proceed = await ctx.ui.confirm(
|
|
207
|
+
"Nvidia NIM — API Key Warning",
|
|
208
|
+
`Key doesn't start with "nvapi-". Nvidia NIM keys usually do.\n\nContinue anyway?`,
|
|
209
|
+
);
|
|
210
|
+
if (!proceed) return;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
// Step 2: Models via multi-line editor
|
|
214
|
+
const existingModelIds = existing?.models.map((m) => m.id).join("\n") ?? "";
|
|
215
|
+
const prefill = MODEL_EDITOR_TEMPLATE + "\n" + (existingModelIds || "meta/llama-3.1-405b-instruct") + "\n";
|
|
216
|
+
|
|
217
|
+
const modelText = await ctx.ui.editor("Nvidia NIM — Edit Models (one per line)", prefill);
|
|
218
|
+
|
|
219
|
+
if (!modelText?.trim()) {
|
|
220
|
+
ctx.ui.notify("Setup cancelled — at least one model is required.", "error");
|
|
221
|
+
return;
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
let models = parseModelLines(modelText);
|
|
225
|
+
if (models.length === 0) {
|
|
226
|
+
ctx.ui.notify("No valid model IDs found. Add at least one non-comment line.", "error");
|
|
227
|
+
return;
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
// Validate model IDs against the Nvidia API
|
|
231
|
+
ctx.ui.notify("Validating model IDs against Nvidia NIM API...", "info");
|
|
232
|
+
const available = await fetchAvailableModels(apiKey);
|
|
233
|
+
if (available) {
|
|
234
|
+
const invalid = models.filter((m) => !available.has(m.id));
|
|
235
|
+
if (invalid.length > 0) {
|
|
236
|
+
const names = invalid.map((m) => m.id).join(", ");
|
|
237
|
+
const proceed = await ctx.ui.confirm(
|
|
238
|
+
"Nvidia NIM — Invalid Model IDs",
|
|
239
|
+
`These model IDs were not found on the API:\n\n ${names}\n\nSave anyway (they'll 404), or cancel to fix them?`,
|
|
240
|
+
);
|
|
241
|
+
if (!proceed) return;
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
// Save + register + add to scoped models
|
|
246
|
+
const config: NvidiaNimConfig = { apiKey, models };
|
|
247
|
+
try {
|
|
248
|
+
await saveConfig(config);
|
|
249
|
+
registerProvider(pi, config);
|
|
250
|
+
await updateEnabledModels(models);
|
|
251
|
+
const names = models.map((m) => m.id).join(", ");
|
|
252
|
+
ctx.ui.notify(`Nvidia NIM configured — ${models.length} model(s): ${names}. Available in /model.`, "info");
|
|
253
|
+
} catch (error) {
|
|
254
|
+
ctx.ui.notify(`Failed to save: ${error instanceof Error ? error.message : String(error)}`, "error");
|
|
255
|
+
}
|
|
256
|
+
};
|
|
257
|
+
|
|
258
|
+
// --- /nvidia-nim-models: quick add/edit models without re-entering key ---
|
|
259
|
+
const modelsHandler = async (args: string | undefined, ctx: ExtensionCommandContext) => {
|
|
260
|
+
if (!ctx.hasUI) {
|
|
261
|
+
console.log("This command requires interactive mode.");
|
|
262
|
+
return;
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
const existing = await loadConfig();
|
|
266
|
+
if (!existing) {
|
|
267
|
+
ctx.ui.notify("Run /nvidia-nim-auth first to set up your API key.", "error");
|
|
268
|
+
return;
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
const existingModelIds = existing.models.map((m) => m.id).join("\n");
|
|
272
|
+
const prefill = MODEL_EDITOR_TEMPLATE + "\n" + existingModelIds + "\n";
|
|
273
|
+
|
|
274
|
+
const modelText = await ctx.ui.editor("Nvidia NIM — Edit Models (one per line)", prefill);
|
|
275
|
+
if (!modelText?.trim()) {
|
|
276
|
+
ctx.ui.notify("Cancelled — models unchanged.", "info");
|
|
277
|
+
return;
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
const models = parseModelLines(modelText);
|
|
281
|
+
if (models.length === 0) {
|
|
282
|
+
ctx.ui.notify("No valid model IDs found. Models unchanged.", "error");
|
|
283
|
+
return;
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
const config: NvidiaNimConfig = { apiKey: existing.apiKey, models };
|
|
287
|
+
try {
|
|
288
|
+
await saveConfig(config);
|
|
289
|
+
registerProvider(pi, config);
|
|
290
|
+
await updateEnabledModels(models);
|
|
291
|
+
ctx.ui.notify(`Nvidia NIM models updated — ${models.length} model(s). Available in /model.`, "info");
|
|
292
|
+
} catch (error) {
|
|
293
|
+
ctx.ui.notify(`Failed to save: ${error instanceof Error ? error.message : String(error)}`, "error");
|
|
294
|
+
}
|
|
295
|
+
};
|
|
296
|
+
|
|
297
|
+
pi.registerCommand("nvidia-nim-auth", {
|
|
298
|
+
description: "Configure Nvidia NIM API key and models",
|
|
299
|
+
handler: authHandler,
|
|
300
|
+
});
|
|
301
|
+
|
|
302
|
+
pi.registerCommand("nvidia-auth", {
|
|
303
|
+
description: "Configure Nvidia NIM (alias for /nvidia-nim-auth)",
|
|
304
|
+
handler: authHandler,
|
|
305
|
+
});
|
|
306
|
+
|
|
307
|
+
pi.registerCommand("nvidia-nim-models", {
|
|
308
|
+
description: "Add or edit Nvidia NIM models",
|
|
309
|
+
handler: modelsHandler,
|
|
310
|
+
});
|
|
311
|
+
}
|
|
@@ -93,7 +93,8 @@ export default function (pi: ExtensionAPI) {
|
|
|
93
93
|
if (ctx.hasUI) {
|
|
94
94
|
ctx.ui.notify(`👋 ${msg}`, "info");
|
|
95
95
|
}
|
|
96
|
-
|
|
96
|
+
// Use setImmediate to ensure shutdown happens after command handler completes
|
|
97
|
+
setImmediate(() => ctx.shutdown());
|
|
97
98
|
},
|
|
98
99
|
});
|
|
99
100
|
|
|
@@ -104,7 +105,8 @@ export default function (pi: ExtensionAPI) {
|
|
|
104
105
|
if (ctx.hasUI) {
|
|
105
106
|
ctx.ui.notify(`👋 ${msg}`, "info");
|
|
106
107
|
}
|
|
107
|
-
|
|
108
|
+
// Use setImmediate to ensure shutdown happens after command handler completes
|
|
109
|
+
setImmediate(() => ctx.shutdown());
|
|
108
110
|
},
|
|
109
111
|
});
|
|
110
112
|
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "pi-agent-extensions",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.3.0",
|
|
4
4
|
"description": "Collection of extensions for pi coding agent",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"repository": {
|
|
@@ -45,7 +45,8 @@
|
|
|
45
45
|
"./extensions/cwd-history/index.ts",
|
|
46
46
|
"./extensions/session-breakdown/index.ts",
|
|
47
47
|
"./extensions/todos/index.ts",
|
|
48
|
-
"./extensions/whimsical/index.ts"
|
|
48
|
+
"./extensions/whimsical/index.ts",
|
|
49
|
+
"./extensions/nvidia-nim/index.ts"
|
|
49
50
|
],
|
|
50
51
|
"themes": [
|
|
51
52
|
"./themes/nightowl.json",
|