@lexmanh/shed-agent 0.2.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,279 @@
1
+ import { CleanableItem } from '@lexmanh/shed-core';
2
+
3
+ /**
4
+ * AIProvider — unified interface for AI backends.
5
+ *
6
+ * Implementations: anthropic, openai, ollama.
7
+ */
8
+ interface AIMessage {
9
+ readonly role: 'user' | 'assistant' | 'system';
10
+ readonly content: string;
11
+ }
12
+ interface AITool {
13
+ readonly name: string;
14
+ readonly description: string;
15
+ readonly inputSchema: Record<string, unknown>;
16
+ }
17
+ interface AIToolCall {
18
+ readonly id: string;
19
+ readonly name: string;
20
+ readonly input: Record<string, unknown>;
21
+ }
22
+ interface AIResponse {
23
+ readonly text: string;
24
+ readonly toolCalls: readonly AIToolCall[];
25
+ readonly stopReason: 'end' | 'tool_use' | 'max_tokens';
26
+ readonly usage: {
27
+ readonly inputTokens: number;
28
+ readonly outputTokens: number;
29
+ };
30
+ }
31
+ interface AIProvider {
32
+ readonly name: string;
33
+ readonly isLocal: boolean;
34
+ /**
35
+ * Send a conversation to the AI and get a response.
36
+ * Supports function calling via tools parameter.
37
+ */
38
+ chat(args: {
39
+ messages: readonly AIMessage[];
40
+ tools?: readonly AITool[];
41
+ maxTokens?: number;
42
+ signal?: AbortSignal;
43
+ }): Promise<AIResponse>;
44
+ }
45
+ /**
46
+ * Privacy preview — what will be sent to the AI provider.
47
+ * Shown to the user BEFORE the request leaves the machine (unless provider is local).
48
+ */
49
+ interface PrivacyPreview {
50
+ readonly providerName: string;
51
+ readonly isLocal: boolean;
52
+ readonly dataIncluded: readonly string[];
53
+ readonly dataExcluded: readonly string[];
54
+ readonly estimatedTokens: number;
55
+ }
56
+
57
+ declare class AnthropicProvider implements AIProvider {
58
+ readonly name = "anthropic";
59
+ readonly isLocal = false;
60
+ private readonly client;
61
+ private readonly model;
62
+ constructor(apiKey: string, model?: string);
63
+ chat(args: {
64
+ messages: readonly AIMessage[];
65
+ tools?: readonly AITool[];
66
+ maxTokens?: number;
67
+ signal?: AbortSignal;
68
+ }): Promise<AIResponse>;
69
+ }
70
+
71
+ declare class GeminiProvider implements AIProvider {
72
+ readonly name = "gemini";
73
+ readonly isLocal = false;
74
+ private readonly client;
75
+ private readonly model;
76
+ constructor(apiKey: string, model?: string);
77
+ chat(args: {
78
+ messages: readonly AIMessage[];
79
+ tools?: readonly AITool[];
80
+ maxTokens?: number;
81
+ signal?: AbortSignal;
82
+ }): Promise<AIResponse>;
83
+ }
84
+
85
+ /**
86
+ * Base provider for OpenAI-compatible APIs.
87
+ * Used by OpenAI, Groq, Mistral, Ollama, OpenRouter — all share the same
88
+ * chat completions API shape, differing only in baseURL and available models.
89
+ */
90
+
91
+ interface OpenAICompatibleOptions {
92
+ readonly apiKey: string;
93
+ readonly model: string;
94
+ readonly baseURL?: string;
95
+ readonly providerName: string;
96
+ readonly isLocal?: boolean;
97
+ }
98
+ declare class OpenAICompatibleProvider implements AIProvider {
99
+ readonly name: string;
100
+ readonly isLocal: boolean;
101
+ private readonly client;
102
+ private readonly model;
103
+ constructor(options: OpenAICompatibleOptions);
104
+ chat(args: {
105
+ messages: readonly AIMessage[];
106
+ tools?: readonly AITool[];
107
+ maxTokens?: number;
108
+ signal?: AbortSignal;
109
+ }): Promise<AIResponse>;
110
+ }
111
+
112
+ declare class GroqProvider extends OpenAICompatibleProvider {
113
+ constructor(apiKey: string, model?: string);
114
+ }
115
+
116
+ declare class MistralProvider extends OpenAICompatibleProvider {
117
+ constructor(apiKey: string, model?: string);
118
+ }
119
+
120
+ declare class OllamaProvider extends OpenAICompatibleProvider {
121
+ constructor(model?: string, endpoint?: string);
122
+ }
123
+
124
+ declare class OpenAIProvider extends OpenAICompatibleProvider {
125
+ constructor(apiKey: string, model?: string);
126
+ }
127
+
128
+ /**
129
+ * OpenRouter — aggregates 200+ models behind a single API key.
130
+ * Default model: google/gemini-2.5-pro (strong, cost-effective).
131
+ * Users can set any model slug from openrouter.ai/models.
132
+ */
133
+ declare class OpenRouterProvider extends OpenAICompatibleProvider {
134
+ constructor(apiKey: string, model?: string);
135
+ }
136
+
137
+ /**
138
+ * Tool schemas used by both the built-in AI (agent package) and MCP server.
139
+ * These are READ-ONLY tools — AI cannot execute cleanup directly.
140
+ * Cleanup requires explicit user confirmation in the CLI.
141
+ */
142
+
143
+ declare const TOOLS: {
144
+ readonly listProjects: {
145
+ name: string;
146
+ description: string;
147
+ inputSchema: {
148
+ type: string;
149
+ properties: {
150
+ root: {
151
+ type: string;
152
+ description: string;
153
+ };
154
+ maxDepth: {
155
+ type: string;
156
+ description: string;
157
+ default: number;
158
+ };
159
+ };
160
+ required: string[];
161
+ };
162
+ };
163
+ readonly analyzeProject: {
164
+ name: string;
165
+ description: string;
166
+ inputSchema: {
167
+ type: string;
168
+ properties: {
169
+ path: {
170
+ type: string;
171
+ description: string;
172
+ };
173
+ };
174
+ required: string[];
175
+ };
176
+ };
177
+ readonly estimateCleanup: {
178
+ name: string;
179
+ description: string;
180
+ inputSchema: {
181
+ type: string;
182
+ properties: {
183
+ itemIds: {
184
+ type: string;
185
+ items: {
186
+ type: string;
187
+ };
188
+ };
189
+ includeRed: {
190
+ type: string;
191
+ default: boolean;
192
+ };
193
+ };
194
+ required: string[];
195
+ };
196
+ };
197
+ readonly getDiskUsage: {
198
+ name: string;
199
+ description: string;
200
+ inputSchema: {
201
+ type: string;
202
+ properties: {
203
+ path: {
204
+ type: string;
205
+ description: string;
206
+ };
207
+ };
208
+ };
209
+ };
210
+ };
211
+ declare const ALL_TOOLS: readonly AITool[];
212
+
213
+ /**
214
+ * ToolExecutor — bridges AI tool calls to shed-core Scanner.
215
+ * All operations are READ-ONLY. AI cannot trigger cleanup.
216
+ */
217
+
218
+ type ToolResult = Record<string, unknown>;
219
+ declare function executeToolCall(name: string, input: Record<string, unknown>, allScannedItems?: readonly CleanableItem[]): Promise<ToolResult>;
220
+
221
+ /**
222
+ * ExplainSession — orchestrates a single AI analysis session.
223
+ *
224
+ * Flow:
225
+ * 1. Build privacy preview (what data will be sent)
226
+ * 2. Show preview and ask user consent (unless provider is local)
227
+ * 3. Run agentic loop: AI calls tools → executor resolves → AI responds
228
+ * 4. Return final explanation text
229
+ *
230
+ * Token budget: warn at 40k, hard stop at 100k (CLAUDE.md rule 8.4).
231
+ */
232
+
233
+ interface ExplainSessionOptions {
234
+ readonly provider: AIProvider;
235
+ readonly scanRoot: string;
236
+ readonly scannedItems?: readonly CleanableItem[];
237
+ /** Called to show privacy preview and get user consent. Return false to abort. */
238
+ readonly onPrivacyPrompt: (preview: PrivacyPreview) => Promise<boolean>;
239
+ /** Called when token warning threshold is reached. */
240
+ readonly onTokenWarning?: (used: number) => void;
241
+ /** System prompt override. */
242
+ readonly systemPrompt?: string;
243
+ }
244
+ interface ExplainResult {
245
+ readonly text: string;
246
+ readonly totalInputTokens: number;
247
+ readonly totalOutputTokens: number;
248
+ readonly aborted: boolean;
249
+ }
250
+ declare class ExplainSession {
251
+ private readonly options;
252
+ constructor(options: ExplainSessionOptions);
253
+ run(userQuestion: string): Promise<ExplainResult>;
254
+ }
255
+
256
+ /**
257
+ * Factory to instantiate the correct AIProvider from shed config.
258
+ * API keys are stored in the OS keychain via keytar.
259
+ *
260
+ * Supported providers: anthropic, openai, gemini, groq, mistral, openrouter, ollama
261
+ */
262
+
263
+ type ProviderName = 'anthropic' | 'openai' | 'gemini' | 'groq' | 'mistral' | 'openrouter' | 'ollama';
264
+ declare const PROVIDER_NAMES: ProviderName[];
265
+ declare const DEFAULT_MODELS: Record<ProviderName, string>;
266
+ declare function getStoredApiKey(provider: ProviderName): Promise<string | null>;
267
+ declare function setStoredApiKey(provider: ProviderName, key: string): Promise<void>;
268
+ declare function deleteStoredApiKey(provider: ProviderName): Promise<void>;
269
+ interface CreateProviderOptions {
270
+ readonly provider: ProviderName;
271
+ readonly model?: string;
272
+ /** For ollama: base URL override */
273
+ readonly ollamaEndpoint?: string;
274
+ /** API key override (skips keychain lookup) */
275
+ readonly apiKey?: string;
276
+ }
277
+ declare function createProvider(options: CreateProviderOptions): Promise<AIProvider>;
278
+
279
+ export { type AIMessage, type AIProvider, type AIResponse, type AITool, type AIToolCall, ALL_TOOLS, AnthropicProvider, type CreateProviderOptions, DEFAULT_MODELS, type ExplainResult, ExplainSession, type ExplainSessionOptions, GeminiProvider, GroqProvider, MistralProvider, OllamaProvider, type OpenAICompatibleOptions, OpenAICompatibleProvider, OpenAIProvider, OpenRouterProvider, PROVIDER_NAMES, type PrivacyPreview, type ProviderName, TOOLS, type ToolResult, createProvider, deleteStoredApiKey, executeToolCall, getStoredApiKey, setStoredApiKey };
package/dist/index.js ADDED
@@ -0,0 +1,588 @@
1
+ // src/providers/anthropic.ts
2
+ import Anthropic from "@anthropic-ai/sdk";
3
+ var AnthropicProvider = class {
4
+ name = "anthropic";
5
+ isLocal = false;
6
+ client;
7
+ model;
8
+ constructor(apiKey, model = "claude-opus-4-7") {
9
+ this.client = new Anthropic({ apiKey });
10
+ this.model = model;
11
+ }
12
+ async chat(args) {
13
+ const sdkMessages = args.messages.filter((m) => m.role !== "system").map((m) => ({ role: m.role, content: m.content }));
14
+ const systemMsg = args.messages.find((m) => m.role === "system")?.content;
15
+ const sdkTools = args.tools?.map((t) => ({
16
+ name: t.name,
17
+ description: t.description,
18
+ input_schema: t.inputSchema
19
+ }));
20
+ const response = await this.client.messages.create(
21
+ {
22
+ model: this.model,
23
+ max_tokens: args.maxTokens ?? 1024,
24
+ ...systemMsg ? { system: systemMsg } : {},
25
+ messages: sdkMessages,
26
+ ...sdkTools?.length ? { tools: sdkTools } : {}
27
+ },
28
+ { signal: args.signal }
29
+ );
30
+ const text = response.content.filter((b) => b.type === "text").map((b) => b.text).join("");
31
+ const toolCalls = response.content.filter((b) => b.type === "tool_use").map((b) => ({ id: b.id, name: b.name, input: b.input }));
32
+ const stopReason = response.stop_reason === "tool_use" ? "tool_use" : response.stop_reason === "max_tokens" ? "max_tokens" : "end";
33
+ return {
34
+ text,
35
+ toolCalls,
36
+ stopReason,
37
+ usage: {
38
+ inputTokens: response.usage.input_tokens,
39
+ outputTokens: response.usage.output_tokens
40
+ }
41
+ };
42
+ }
43
+ };
44
+
45
+ // src/providers/gemini.ts
46
+ import { GoogleGenAI } from "@google/genai";
47
+ var GeminiProvider = class {
48
+ name = "gemini";
49
+ isLocal = false;
50
+ client;
51
+ model;
52
+ constructor(apiKey, model = "gemini-2.5-pro") {
53
+ this.client = new GoogleGenAI({ apiKey });
54
+ this.model = model;
55
+ }
56
+ async chat(args) {
57
+ const systemMsg = args.messages.find((m) => m.role === "system")?.content;
58
+ const history = args.messages.filter((m) => m.role !== "system").slice(0, -1).map((m) => ({
59
+ role: m.role === "assistant" ? "model" : "user",
60
+ parts: [{ text: m.content }]
61
+ }));
62
+ const lastMsg = args.messages.filter((m) => m.role !== "system").at(-1);
63
+ const sdkTools = args.tools?.length ? [
64
+ {
65
+ functionDeclarations: args.tools.map((t) => ({
66
+ name: t.name,
67
+ description: t.description,
68
+ parameters: t.inputSchema
69
+ }))
70
+ }
71
+ ] : void 0;
72
+ const chat = this.client.chats.create({
73
+ model: this.model,
74
+ ...systemMsg ? { systemInstruction: systemMsg } : {},
75
+ history,
76
+ config: {
77
+ maxOutputTokens: args.maxTokens ?? 1024,
78
+ ...sdkTools ? { tools: sdkTools } : {}
79
+ }
80
+ });
81
+ const response = await chat.sendMessage({ message: lastMsg?.content ?? "" });
82
+ const text = response.text ?? "";
83
+ const toolCalls = (response.functionCalls ?? []).map((fc, i) => ({
84
+ id: `gemini-tc-${i}`,
85
+ name: fc.name ?? "",
86
+ input: fc.args ?? {}
87
+ }));
88
+ const stopReason = toolCalls.length > 0 ? "tool_use" : response.candidates?.[0]?.finishReason === "MAX_TOKENS" ? "max_tokens" : "end";
89
+ const usage = response.usageMetadata;
90
+ return {
91
+ text,
92
+ toolCalls,
93
+ stopReason,
94
+ usage: {
95
+ inputTokens: usage?.promptTokenCount ?? 0,
96
+ outputTokens: usage?.candidatesTokenCount ?? 0
97
+ }
98
+ };
99
+ }
100
+ };
101
+
102
+ // src/providers/openai-compatible.ts
103
+ import OpenAI from "openai";
104
+ var OpenAICompatibleProvider = class {
105
+ name;
106
+ isLocal;
107
+ client;
108
+ model;
109
+ constructor(options) {
110
+ this.name = options.providerName;
111
+ this.isLocal = options.isLocal ?? false;
112
+ this.model = options.model;
113
+ this.client = new OpenAI({
114
+ apiKey: options.apiKey,
115
+ ...options.baseURL ? { baseURL: options.baseURL } : {}
116
+ });
117
+ }
118
+ async chat(args) {
119
+ const sdkMessages = args.messages.map((m) => ({
120
+ role: m.role,
121
+ content: m.content
122
+ }));
123
+ const sdkTools = args.tools?.map((t) => ({
124
+ type: "function",
125
+ function: {
126
+ name: t.name,
127
+ description: t.description,
128
+ parameters: t.inputSchema
129
+ }
130
+ }));
131
+ const response = await this.client.chat.completions.create(
132
+ {
133
+ model: this.model,
134
+ max_tokens: args.maxTokens ?? 1024,
135
+ messages: sdkMessages,
136
+ ...sdkTools?.length ? { tools: sdkTools } : {}
137
+ },
138
+ { signal: args.signal }
139
+ );
140
+ const choice = response.choices[0];
141
+ const message = choice?.message;
142
+ const text = message?.content ?? "";
143
+ const toolCalls = (message?.tool_calls ?? []).map((tc) => ({
144
+ id: tc.id,
145
+ name: tc.function.name,
146
+ input: JSON.parse(tc.function.arguments)
147
+ }));
148
+ const stopReason = choice?.finish_reason === "tool_calls" ? "tool_use" : choice?.finish_reason === "length" ? "max_tokens" : "end";
149
+ return {
150
+ text,
151
+ toolCalls,
152
+ stopReason,
153
+ usage: {
154
+ inputTokens: response.usage?.prompt_tokens ?? 0,
155
+ outputTokens: response.usage?.completion_tokens ?? 0
156
+ }
157
+ };
158
+ }
159
+ };
160
+
161
+ // src/providers/groq.ts
162
+ var GroqProvider = class extends OpenAICompatibleProvider {
163
+ constructor(apiKey, model = "llama-3.3-70b-versatile") {
164
+ super({
165
+ apiKey,
166
+ model,
167
+ baseURL: "https://api.groq.com/openai/v1",
168
+ providerName: "groq"
169
+ });
170
+ }
171
+ };
172
+
173
+ // src/providers/mistral.ts
174
+ var MistralProvider = class extends OpenAICompatibleProvider {
175
+ constructor(apiKey, model = "mistral-large-latest") {
176
+ super({
177
+ apiKey,
178
+ model,
179
+ baseURL: "https://api.mistral.ai/v1",
180
+ providerName: "mistral"
181
+ });
182
+ }
183
+ };
184
+
185
+ // src/providers/ollama.ts
186
+ var OllamaProvider = class extends OpenAICompatibleProvider {
187
+ constructor(model = "llama3.2", endpoint = "http://localhost:11434") {
188
+ super({
189
+ apiKey: "ollama",
190
+ // Ollama doesn't require a real key
191
+ model,
192
+ baseURL: `${endpoint}/v1`,
193
+ providerName: "ollama",
194
+ isLocal: true
195
+ });
196
+ }
197
+ };
198
+
199
+ // src/providers/openai.ts
200
+ var OpenAIProvider = class extends OpenAICompatibleProvider {
201
+ constructor(apiKey, model = "gpt-4o") {
202
+ super({ apiKey, model, providerName: "openai" });
203
+ }
204
+ };
205
+
206
+ // src/providers/openrouter.ts
207
+ var OpenRouterProvider = class extends OpenAICompatibleProvider {
208
+ constructor(apiKey, model = "google/gemini-2.5-pro") {
209
+ super({
210
+ apiKey,
211
+ model,
212
+ baseURL: "https://openrouter.ai/api/v1",
213
+ providerName: "openrouter"
214
+ });
215
+ }
216
+ };
217
+
218
+ // src/tools.ts
219
+ var TOOLS = {
220
+ listProjects: {
221
+ name: "list_projects",
222
+ description: "List all detected projects under a given path. Returns project type, path, last-modified date, and total cleanable size. Read-only.",
223
+ inputSchema: {
224
+ type: "object",
225
+ properties: {
226
+ root: { type: "string", description: "Absolute path to scan from" },
227
+ maxDepth: { type: "number", description: "Max directory depth", default: 5 }
228
+ },
229
+ required: ["root"]
230
+ }
231
+ },
232
+ analyzeProject: {
233
+ name: "analyze_project",
234
+ description: "Get detailed analysis of a single project: all cleanable items, risk tiers, git status, last activity. Read-only.",
235
+ inputSchema: {
236
+ type: "object",
237
+ properties: {
238
+ path: { type: "string", description: "Absolute path to the project root" }
239
+ },
240
+ required: ["path"]
241
+ }
242
+ },
243
+ estimateCleanup: {
244
+ name: "estimate_cleanup",
245
+ description: "Estimate space that would be freed by cleaning a set of items. Runs safety checks and returns what would be allowed vs skipped. Read-only, never deletes.",
246
+ inputSchema: {
247
+ type: "object",
248
+ properties: {
249
+ itemIds: { type: "array", items: { type: "string" } },
250
+ includeRed: { type: "boolean", default: false }
251
+ },
252
+ required: ["itemIds"]
253
+ }
254
+ },
255
+ getDiskUsage: {
256
+ name: "get_disk_usage",
257
+ description: "Get current disk usage on the user machine (free / used / total).",
258
+ inputSchema: {
259
+ type: "object",
260
+ properties: {
261
+ path: { type: "string", description: "Path to check (defaults to home)" }
262
+ }
263
+ }
264
+ }
265
+ };
266
+ var ALL_TOOLS = Object.values(TOOLS);
267
+
268
+ // src/tool-executor.ts
269
+ import {
270
+ AndroidDetector,
271
+ CocoaPodsDetector,
272
+ DockerDetector,
273
+ FlutterDetector,
274
+ IdeDetector,
275
+ NodeDetector,
276
+ PythonDetector,
277
+ RustDetector,
278
+ SafetyChecker,
279
+ Scanner,
280
+ XcodeDetector
281
+ } from "@lexmanh/shed-core";
282
+ import { execa } from "execa";
283
+ function makeScanner() {
284
+ return new Scanner([
285
+ new NodeDetector(),
286
+ new PythonDetector(),
287
+ new RustDetector(),
288
+ new DockerDetector(),
289
+ new XcodeDetector(),
290
+ new FlutterDetector(),
291
+ new AndroidDetector(),
292
+ new CocoaPodsDetector(),
293
+ new IdeDetector()
294
+ ]);
295
+ }
296
+ async function executeToolCall(name, input, allScannedItems) {
297
+ switch (name) {
298
+ case "list_projects": {
299
+ const root = input.root ?? process.env.HOME ?? "/";
300
+ const maxDepth = input.maxDepth ?? 5;
301
+ const scanner = makeScanner();
302
+ const ctx = { scanRoot: root, maxDepth };
303
+ const [projects, globalItems] = await Promise.all([
304
+ scanner.scan(root),
305
+ scanner.scanGlobal(ctx)
306
+ ]);
307
+ return {
308
+ root,
309
+ projectCount: projects.length,
310
+ projects: projects.map((p) => ({
311
+ root: p.root,
312
+ type: p.type,
313
+ name: p.name,
314
+ lastModified: p.lastModified,
315
+ cleanableItems: p.items.length,
316
+ totalBytes: p.items.reduce((s, i) => s + i.sizeBytes, 0)
317
+ })),
318
+ globalItemCount: globalItems.length,
319
+ globalTotalBytes: globalItems.reduce((s, i) => s + i.sizeBytes, 0)
320
+ };
321
+ }
322
+ case "analyze_project": {
323
+ const path = input.path;
324
+ const scanner = makeScanner();
325
+ const ctx = { scanRoot: path, maxDepth: 3 };
326
+ const projects = await scanner.scan(path);
327
+ const project = projects.find((p) => p.root === path) ?? projects[0];
328
+ if (!project) return { error: `No project detected at ${path}` };
329
+ const checker = new SafetyChecker();
330
+ const checkResults = await Promise.all(project.items.map((i) => checker.check(i)));
331
+ return {
332
+ root: project.root,
333
+ type: project.type,
334
+ name: project.name,
335
+ hasGit: project.hasGit,
336
+ lastModified: project.lastModified,
337
+ items: project.items.map((item, idx) => ({
338
+ id: item.id,
339
+ path: item.path,
340
+ risk: item.risk,
341
+ sizeBytes: item.sizeBytes,
342
+ description: item.description,
343
+ safetyAllowed: checkResults[idx]?.allowed ?? false,
344
+ safetyReasons: checkResults[idx]?.reasons ?? []
345
+ })),
346
+ // scanGlobal not relevant per-project
347
+ ctx
348
+ };
349
+ }
350
+ case "estimate_cleanup": {
351
+ const itemIds = input.itemIds ?? [];
352
+ if (!allScannedItems) {
353
+ return { error: "No scan context available. Run list_projects first." };
354
+ }
355
+ const targets = allScannedItems.filter((i) => itemIds.includes(i.id));
356
+ const checker = new SafetyChecker();
357
+ const results = await Promise.all(targets.map((i) => checker.check(i)));
358
+ const allowed = targets.filter((_, idx) => results[idx]?.allowed);
359
+ const blocked = targets.filter((_, idx) => !results[idx]?.allowed);
360
+ return {
361
+ totalRequested: targets.length,
362
+ allowedCount: allowed.length,
363
+ blockedCount: blocked.length,
364
+ estimatedBytesFreed: allowed.reduce((s, i) => s + i.sizeBytes, 0),
365
+ blocked: blocked.map((i) => ({
366
+ id: i.id,
367
+ path: i.path,
368
+ reason: results[targets.indexOf(i)]?.reasons.find((r) => r.severity === "block")?.message
369
+ }))
370
+ };
371
+ }
372
+ case "get_disk_usage": {
373
+ const checkPath = input.path ?? process.env.HOME ?? "/";
374
+ try {
375
+ const { stdout } = await execa("df", ["-k", checkPath]);
376
+ const lines = stdout.trim().split("\n");
377
+ const parts = lines[1]?.split(/\s+/) ?? [];
378
+ const totalKb = Number.parseInt(parts[1] ?? "0", 10);
379
+ const usedKb = Number.parseInt(parts[2] ?? "0", 10);
380
+ const availKb = Number.parseInt(parts[3] ?? "0", 10);
381
+ return {
382
+ path: checkPath,
383
+ totalBytes: totalKb * 1024,
384
+ usedBytes: usedKb * 1024,
385
+ freeBytes: availKb * 1024,
386
+ usedPercent: totalKb > 0 ? Math.round(usedKb / totalKb * 100) : 0
387
+ };
388
+ } catch {
389
+ return { error: "Could not read disk usage" };
390
+ }
391
+ }
392
+ default:
393
+ return { error: `Unknown tool: ${name}` };
394
+ }
395
+ }
396
+
397
+ // src/explain-session.ts
398
+ var TOKEN_WARN = 4e4;
399
+ var TOKEN_HARD_STOP = 1e5;
400
+ var DEFAULT_SYSTEM = `You are Shed, an AI assistant helping developers reclaim disk space safely.
401
+ You have access to tools to scan projects and analyze disk usage. Use them to give accurate,
402
+ specific recommendations. Always explain WHY something is safe to delete and how to regenerate it.
403
+ Be concise. Format sizes in human-readable form (MB/GB). Never suggest deleting anything without
404
+ explaining the risk tier and how to recover.`;
405
+ var ExplainSession = class {
406
+ options;
407
+ constructor(options) {
408
+ this.options = options;
409
+ }
410
+ async run(userQuestion) {
411
+ const { provider, scanRoot, scannedItems, onPrivacyPrompt, onTokenWarning } = this.options;
412
+ if (!provider.isLocal) {
413
+ const preview = buildPrivacyPreview(provider.name, scanRoot, scannedItems);
414
+ const consented = await onPrivacyPrompt(preview);
415
+ if (!consented) {
416
+ return { text: "", totalInputTokens: 0, totalOutputTokens: 0, aborted: true };
417
+ }
418
+ }
419
+ const messages = [
420
+ {
421
+ role: "system",
422
+ content: this.options.systemPrompt ?? DEFAULT_SYSTEM
423
+ },
424
+ { role: "user", content: userQuestion }
425
+ ];
426
+ let totalInput = 0;
427
+ let totalOutput = 0;
428
+ for (let i = 0; i < 10; i++) {
429
+ const response = await provider.chat({
430
+ messages,
431
+ tools: ALL_TOOLS,
432
+ maxTokens: 2048
433
+ });
434
+ totalInput += response.usage.inputTokens;
435
+ totalOutput += response.usage.outputTokens;
436
+ const totalUsed = totalInput + totalOutput;
437
+ if (totalUsed >= TOKEN_HARD_STOP) {
438
+ return {
439
+ text: response.text || "[Session stopped: token budget exceeded]",
440
+ totalInputTokens: totalInput,
441
+ totalOutputTokens: totalOutput,
442
+ aborted: true
443
+ };
444
+ }
445
+ if (totalUsed >= TOKEN_WARN) {
446
+ onTokenWarning?.(totalUsed);
447
+ }
448
+ if (response.stopReason !== "tool_use" || response.toolCalls.length === 0) {
449
+ return {
450
+ text: response.text,
451
+ totalInputTokens: totalInput,
452
+ totalOutputTokens: totalOutput,
453
+ aborted: false
454
+ };
455
+ }
456
+ messages.push({ role: "assistant", content: response.text || "[tool use]" });
457
+ for (const tc of response.toolCalls) {
458
+ const result = await executeToolCall(tc.name, tc.input, scannedItems);
459
+ messages.push({
460
+ role: "user",
461
+ content: `Tool result for ${tc.name} (id: ${tc.id}):
462
+ ${JSON.stringify(result, null, 2)}`
463
+ });
464
+ }
465
+ }
466
+ return {
467
+ text: "[Session ended: max iterations reached]",
468
+ totalInputTokens: totalInput,
469
+ totalOutputTokens: totalOutput,
470
+ aborted: true
471
+ };
472
+ }
473
+ };
474
+ function buildPrivacyPreview(providerName, scanRoot, items) {
475
+ const itemCount = items?.length ?? 0;
476
+ return {
477
+ providerName,
478
+ isLocal: false,
479
+ dataIncluded: [
480
+ `Scan root path: ${scanRoot}`,
481
+ ...itemCount > 0 ? [`${itemCount} cleanable item paths (names only, no file contents)`] : [],
482
+ "Project types detected (node, python, rust, etc.)",
483
+ "Item sizes and risk tiers"
484
+ ],
485
+ dataExcluded: [
486
+ "File contents",
487
+ "Source code",
488
+ "Environment variables",
489
+ "API keys or secrets",
490
+ "Git history"
491
+ ],
492
+ estimatedTokens: 500 + itemCount * 30
493
+ };
494
+ }
495
+
496
+ // src/provider-factory.ts
497
+ var PROVIDER_NAMES = [
498
+ "anthropic",
499
+ "openai",
500
+ "gemini",
501
+ "groq",
502
+ "mistral",
503
+ "openrouter",
504
+ "ollama"
505
+ ];
506
+ var DEFAULT_MODELS = {
507
+ anthropic: "claude-opus-4-7",
508
+ openai: "gpt-4o",
509
+ gemini: "gemini-2.5-pro",
510
+ groq: "llama-3.3-70b-versatile",
511
+ mistral: "mistral-large-latest",
512
+ openrouter: "google/gemini-2.5-pro",
513
+ ollama: "llama3.2"
514
+ };
515
+ var KEYTAR_SERVICE = "shed-ai";
516
+ async function getStoredApiKey(provider) {
517
+ try {
518
+ const { default: keytar } = await import("keytar");
519
+ return await keytar.getPassword(KEYTAR_SERVICE, provider);
520
+ } catch {
521
+ return process.env[`SHED_${provider.toUpperCase()}_API_KEY`] ?? null;
522
+ }
523
+ }
524
+ async function setStoredApiKey(provider, key) {
525
+ try {
526
+ const { default: keytar } = await import("keytar");
527
+ await keytar.setPassword(KEYTAR_SERVICE, provider, key);
528
+ } catch {
529
+ throw new Error(
530
+ `Could not store API key in keychain. Set SHED_${provider.toUpperCase()}_API_KEY env var instead.`
531
+ );
532
+ }
533
+ }
534
+ async function deleteStoredApiKey(provider) {
535
+ try {
536
+ const { default: keytar } = await import("keytar");
537
+ await keytar.deletePassword(KEYTAR_SERVICE, provider);
538
+ } catch {
539
+ }
540
+ }
541
+ async function createProvider(options) {
542
+ const { provider, model } = options;
543
+ if (provider === "ollama") {
544
+ return new OllamaProvider(model ?? DEFAULT_MODELS.ollama, options.ollamaEndpoint);
545
+ }
546
+ const apiKey = options.apiKey ?? await getStoredApiKey(provider) ?? process.env[`SHED_${provider.toUpperCase()}_API_KEY`];
547
+ if (!apiKey) {
548
+ throw new Error(
549
+ `No API key found for provider "${provider}". Run: shed config set-key ${provider}
550
+ Or set env var: SHED_${provider.toUpperCase()}_API_KEY`
551
+ );
552
+ }
553
+ const m = model ?? DEFAULT_MODELS[provider];
554
+ switch (provider) {
555
+ case "anthropic":
556
+ return new AnthropicProvider(apiKey, m);
557
+ case "openai":
558
+ return new OpenAIProvider(apiKey, m);
559
+ case "gemini":
560
+ return new GeminiProvider(apiKey, m);
561
+ case "groq":
562
+ return new GroqProvider(apiKey, m);
563
+ case "mistral":
564
+ return new MistralProvider(apiKey, m);
565
+ case "openrouter":
566
+ return new OpenRouterProvider(apiKey, m);
567
+ }
568
+ }
569
+ export {
570
+ ALL_TOOLS,
571
+ AnthropicProvider,
572
+ DEFAULT_MODELS,
573
+ ExplainSession,
574
+ GeminiProvider,
575
+ GroqProvider,
576
+ MistralProvider,
577
+ OllamaProvider,
578
+ OpenAICompatibleProvider,
579
+ OpenAIProvider,
580
+ OpenRouterProvider,
581
+ PROVIDER_NAMES,
582
+ TOOLS,
583
+ createProvider,
584
+ deleteStoredApiKey,
585
+ executeToolCall,
586
+ getStoredApiKey,
587
+ setStoredApiKey
588
+ };
package/package.json ADDED
@@ -0,0 +1,35 @@
1
+ {
2
+ "name": "@lexmanh/shed-agent",
3
+ "version": "0.2.0-beta.1",
4
+ "description": "AI provider abstraction for Shed — supports Anthropic, OpenAI, Gemini, Groq, Mistral, OpenRouter, and Ollama",
5
+ "type": "module",
6
+ "main": "./dist/index.js",
7
+ "types": "./dist/index.d.ts",
8
+ "files": [
9
+ "dist"
10
+ ],
11
+ "dependencies": {
12
+ "@anthropic-ai/sdk": "^0.32.0",
13
+ "@google/genai": "^1.50.1",
14
+ "execa": "^9.5.0",
15
+ "keytar": "^7.9.0",
16
+ "openai": "^4.77.0",
17
+ "@lexmanh/shed-core": "0.2.0-beta.1"
18
+ },
19
+ "devDependencies": {
20
+ "@types/node": "^22.10.0",
21
+ "tsup": "^8.3.0",
22
+ "typescript": "^5.7.0",
23
+ "vitest": "^2.1.0"
24
+ },
25
+ "publishConfig": {
26
+ "access": "public"
27
+ },
28
+ "scripts": {
29
+ "build": "tsup src/index.ts --format esm --dts --clean",
30
+ "dev": "tsup src/index.ts --format esm --dts --watch",
31
+ "test": "vitest run --passWithNoTests",
32
+ "test:watch": "vitest",
33
+ "typecheck": "tsc --noEmit"
34
+ }
35
+ }