@plures/runebook 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ANALYSIS_LADDER.md +231 -0
- package/CHANGELOG.md +124 -0
- package/INTEGRATIONS.md +242 -0
- package/LICENSE +21 -0
- package/MEMORY.md +253 -0
- package/NIXOS.md +357 -0
- package/QUICKSTART.md +157 -0
- package/README.md +295 -0
- package/RELEASE.md +190 -0
- package/ValidationChecklist.md +598 -0
- package/docs/demo.md +338 -0
- package/docs/llm-integration.md +300 -0
- package/docs/parallel-execution-plan.md +160 -0
- package/flake.nix +228 -0
- package/integrations/README.md +242 -0
- package/integrations/demo-steps.sh +64 -0
- package/integrations/nvim-runebook.lua +140 -0
- package/integrations/tmux-status.sh +51 -0
- package/integrations/vim-runebook.vim +77 -0
- package/integrations/wezterm-status-simple.lua +48 -0
- package/integrations/wezterm-status.lua +76 -0
- package/nixos-module.nix +156 -0
- package/package.json +76 -0
- package/packages/design-dojo/index.js +4 -0
- package/packages/design-dojo/package.json +20 -0
- package/packages/design-dojo/tokens.css +69 -0
- package/playwright.config.ts +16 -0
- package/scripts/check-versions.cjs +62 -0
- package/scripts/demo.sh +220 -0
- package/shell.nix +31 -0
- package/src/app.html +13 -0
- package/src/cli/index.ts +1050 -0
- package/src/lib/agent/analysis-pipeline.ts +347 -0
- package/src/lib/agent/analysis-service.ts +171 -0
- package/src/lib/agent/analysis.ts +159 -0
- package/src/lib/agent/analyzers/heuristic.ts +289 -0
- package/src/lib/agent/analyzers/index.ts +7 -0
- package/src/lib/agent/analyzers/llm.ts +204 -0
- package/src/lib/agent/analyzers/local-search.ts +215 -0
- package/src/lib/agent/capture.ts +123 -0
- package/src/lib/agent/index.ts +244 -0
- package/src/lib/agent/integration.ts +81 -0
- package/src/lib/agent/llm/providers/base.ts +99 -0
- package/src/lib/agent/llm/providers/index.ts +60 -0
- package/src/lib/agent/llm/providers/mock.ts +67 -0
- package/src/lib/agent/llm/providers/ollama.ts +151 -0
- package/src/lib/agent/llm/providers/openai.ts +153 -0
- package/src/lib/agent/llm/sanitizer.ts +170 -0
- package/src/lib/agent/llm/types.ts +118 -0
- package/src/lib/agent/memory.ts +363 -0
- package/src/lib/agent/node-status.ts +56 -0
- package/src/lib/agent/node-suggestions.ts +64 -0
- package/src/lib/agent/status.ts +80 -0
- package/src/lib/agent/suggestions.ts +169 -0
- package/src/lib/components/Canvas.svelte +124 -0
- package/src/lib/components/ConnectionLine.svelte +46 -0
- package/src/lib/components/DisplayNode.svelte +167 -0
- package/src/lib/components/InputNode.svelte +158 -0
- package/src/lib/components/TerminalNode.svelte +237 -0
- package/src/lib/components/Toolbar.svelte +359 -0
- package/src/lib/components/TransformNode.svelte +327 -0
- package/src/lib/core/index.ts +31 -0
- package/src/lib/core/observer.ts +278 -0
- package/src/lib/core/redaction.ts +158 -0
- package/src/lib/core/shell-adapters/base.ts +325 -0
- package/src/lib/core/shell-adapters/bash.ts +110 -0
- package/src/lib/core/shell-adapters/index.ts +62 -0
- package/src/lib/core/shell-adapters/zsh.ts +105 -0
- package/src/lib/core/storage.ts +360 -0
- package/src/lib/core/types.ts +176 -0
- package/src/lib/design-dojo/Box.svelte +47 -0
- package/src/lib/design-dojo/Button.svelte +75 -0
- package/src/lib/design-dojo/Input.svelte +65 -0
- package/src/lib/design-dojo/List.svelte +38 -0
- package/src/lib/design-dojo/Select.svelte +48 -0
- package/src/lib/design-dojo/SplitPane.svelte +43 -0
- package/src/lib/design-dojo/StatusBar.svelte +61 -0
- package/src/lib/design-dojo/Table.svelte +47 -0
- package/src/lib/design-dojo/Text.svelte +36 -0
- package/src/lib/design-dojo/Toggle.svelte +48 -0
- package/src/lib/design-dojo/index.ts +10 -0
- package/src/lib/stores/canvas-praxis.ts +268 -0
- package/src/lib/stores/canvas.ts +58 -0
- package/src/lib/types/agent.ts +78 -0
- package/src/lib/types/canvas.ts +71 -0
- package/src/lib/utils/storage.ts +326 -0
- package/src/lib/utils/yaml-loader.ts +52 -0
- package/src/routes/+layout.svelte +5 -0
- package/src/routes/+layout.ts +5 -0
- package/src/routes/+page.svelte +32 -0
- package/src-tauri/Cargo.lock +5735 -0
- package/src-tauri/Cargo.toml +38 -0
- package/src-tauri/build.rs +3 -0
- package/src-tauri/capabilities/default.json +10 -0
- package/src-tauri/icons/128x128.png +0 -0
- package/src-tauri/icons/128x128@2x.png +0 -0
- package/src-tauri/icons/32x32.png +0 -0
- package/src-tauri/icons/Square107x107Logo.png +0 -0
- package/src-tauri/icons/Square142x142Logo.png +0 -0
- package/src-tauri/icons/Square150x150Logo.png +0 -0
- package/src-tauri/icons/Square284x284Logo.png +0 -0
- package/src-tauri/icons/Square30x30Logo.png +0 -0
- package/src-tauri/icons/Square310x310Logo.png +0 -0
- package/src-tauri/icons/Square44x44Logo.png +0 -0
- package/src-tauri/icons/Square71x71Logo.png +0 -0
- package/src-tauri/icons/Square89x89Logo.png +0 -0
- package/src-tauri/icons/StoreLogo.png +0 -0
- package/src-tauri/icons/icon.icns +0 -0
- package/src-tauri/icons/icon.ico +0 -0
- package/src-tauri/icons/icon.png +0 -0
- package/src-tauri/src/agents/agent1.rs +66 -0
- package/src-tauri/src/agents/agent2.rs +80 -0
- package/src-tauri/src/agents/agent3.rs +73 -0
- package/src-tauri/src/agents/agent4.rs +66 -0
- package/src-tauri/src/agents/agent5.rs +68 -0
- package/src-tauri/src/agents/agent6.rs +75 -0
- package/src-tauri/src/agents/base.rs +52 -0
- package/src-tauri/src/agents/mod.rs +17 -0
- package/src-tauri/src/core/coordination.rs +117 -0
- package/src-tauri/src/core/mod.rs +12 -0
- package/src-tauri/src/core/ownership.rs +61 -0
- package/src-tauri/src/core/types.rs +132 -0
- package/src-tauri/src/execution/mod.rs +5 -0
- package/src-tauri/src/execution/runner.rs +143 -0
- package/src-tauri/src/lib.rs +161 -0
- package/src-tauri/src/main.rs +6 -0
- package/src-tauri/src/memory/api.rs +422 -0
- package/src-tauri/src/memory/client.rs +156 -0
- package/src-tauri/src/memory/encryption.rs +79 -0
- package/src-tauri/src/memory/migration.rs +110 -0
- package/src-tauri/src/memory/mod.rs +28 -0
- package/src-tauri/src/memory/schema.rs +275 -0
- package/src-tauri/src/memory/tests.rs +192 -0
- package/src-tauri/src/orchestrator/coordinator.rs +232 -0
- package/src-tauri/src/orchestrator/mod.rs +13 -0
- package/src-tauri/src/orchestrator/planner.rs +304 -0
- package/src-tauri/tauri.conf.json +35 -0
- package/static/examples/date-time-example.yaml +147 -0
- package/static/examples/hello-world.yaml +74 -0
- package/static/examples/transform-example.yaml +157 -0
- package/static/favicon.png +0 -0
- package/static/svelte.svg +1 -0
- package/static/tauri.svg +6 -0
- package/static/vite.svg +1 -0
- package/svelte.config.js +18 -0
- package/tsconfig.json +19 -0
- package/vite.config.js +45 -0
- package/vitest.config.ts +21 -0
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
// Base LLM Provider
|
|
2
|
+
// Abstract base class for all LLM providers
|
|
3
|
+
|
|
4
|
+
import type { LLMProvider, MCPToolInput, MCPToolOutput, SanitizedContext } from '../types';
|
|
5
|
+
import { sanitizeContext, formatContextForReview } from '../sanitizer';
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Base provider implementation
|
|
9
|
+
*/
|
|
10
|
+
export abstract class BaseLLMProvider implements LLMProvider {
|
|
11
|
+
abstract name: string;
|
|
12
|
+
protected requireUserReview: boolean = true;
|
|
13
|
+
protected cacheEnabled: boolean = false;
|
|
14
|
+
protected cache: Map<string, { output: MCPToolOutput; timestamp: number }> = new Map();
|
|
15
|
+
protected cacheTtl: number = 3600; // 1 hour default
|
|
16
|
+
|
|
17
|
+
constructor(requireUserReview: boolean = true, cacheEnabled: boolean = false, cacheTtl: number = 3600) {
|
|
18
|
+
this.requireUserReview = requireUserReview;
|
|
19
|
+
this.cacheEnabled = cacheEnabled;
|
|
20
|
+
this.cacheTtl = cacheTtl;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
abstract isAvailable(): Promise<boolean>;
|
|
24
|
+
abstract callLLM(input: MCPToolInput, sanitized: SanitizedContext): Promise<MCPToolOutput>;
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Sanitize context before sending
|
|
28
|
+
*/
|
|
29
|
+
async sanitizeContext(context: MCPToolInput['contextWindow']): Promise<SanitizedContext> {
|
|
30
|
+
return sanitizeContext(context);
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Analyze with safety checks
|
|
35
|
+
*/
|
|
36
|
+
async analyze(input: MCPToolInput): Promise<MCPToolOutput> {
|
|
37
|
+
// Sanitize context
|
|
38
|
+
const sanitized = await this.sanitizeContext(input.contextWindow);
|
|
39
|
+
|
|
40
|
+
// Check cache if enabled
|
|
41
|
+
if (this.cacheEnabled) {
|
|
42
|
+
const cacheKey = this.getCacheKey(input, sanitized);
|
|
43
|
+
const cached = this.cache.get(cacheKey);
|
|
44
|
+
if (cached && (Date.now() - cached.timestamp) < this.cacheTtl * 1000) {
|
|
45
|
+
return cached.output;
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// User review (if required)
|
|
50
|
+
if (this.requireUserReview) {
|
|
51
|
+
const reviewText = formatContextForReview(sanitized);
|
|
52
|
+
// In CLI mode, we'll log this and wait for confirmation
|
|
53
|
+
// In GUI mode, this would show a dialog
|
|
54
|
+
console.log(reviewText);
|
|
55
|
+
console.log('\n⚠️ Context will be sent to LLM. Review above and confirm.');
|
|
56
|
+
// For now, we'll proceed (in real implementation, this would wait for user input)
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
// Call LLM with sanitized context
|
|
60
|
+
const sanitizedInput: MCPToolInput = {
|
|
61
|
+
...input,
|
|
62
|
+
contextWindow: sanitized.sanitized,
|
|
63
|
+
};
|
|
64
|
+
|
|
65
|
+
const output = await this.callLLM(sanitizedInput, sanitized);
|
|
66
|
+
|
|
67
|
+
// Cache result if enabled
|
|
68
|
+
if (this.cacheEnabled) {
|
|
69
|
+
const cacheKey = this.getCacheKey(input, sanitized);
|
|
70
|
+
this.cache.set(cacheKey, {
|
|
71
|
+
output,
|
|
72
|
+
timestamp: Date.now(),
|
|
73
|
+
});
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
return output;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Generate cache key from input
|
|
81
|
+
*/
|
|
82
|
+
protected getCacheKey(input: MCPToolInput, sanitized: SanitizedContext): string {
|
|
83
|
+
const key = JSON.stringify({
|
|
84
|
+
command: sanitized.sanitized.command,
|
|
85
|
+
args: sanitized.sanitized.args,
|
|
86
|
+
exitCode: sanitized.sanitized.exitCode,
|
|
87
|
+
stderr: sanitized.sanitized.stderr.substring(0, 500), // First 500 chars
|
|
88
|
+
});
|
|
89
|
+
return Buffer.from(key).toString('base64');
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Clear cache
|
|
94
|
+
*/
|
|
95
|
+
clearCache(): void {
|
|
96
|
+
this.cache.clear();
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
// LLM Provider Factory
|
|
2
|
+
// Creates appropriate provider based on config
|
|
3
|
+
|
|
4
|
+
import type { LLMProvider, LLMProviderConfig } from '../types';
|
|
5
|
+
import { OllamaProvider } from './ollama';
|
|
6
|
+
import { OpenAIProvider } from './openai';
|
|
7
|
+
import { MockProvider } from './mock';
|
|
8
|
+
|
|
9
|
+
export { MockProvider, OllamaProvider, OpenAIProvider };
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Create LLM provider from config
|
|
13
|
+
*/
|
|
14
|
+
export function createLLMProvider(config: LLMProviderConfig): LLMProvider | null {
|
|
15
|
+
if (!config.enabled) {
|
|
16
|
+
return null;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
switch (config.type) {
|
|
20
|
+
case 'ollama':
|
|
21
|
+
return new OllamaProvider(config);
|
|
22
|
+
|
|
23
|
+
case 'openai':
|
|
24
|
+
try {
|
|
25
|
+
return new OpenAIProvider(config);
|
|
26
|
+
} catch (error) {
|
|
27
|
+
console.error('Failed to create OpenAI provider:', error);
|
|
28
|
+
return null;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
case 'mock':
|
|
32
|
+
return new MockProvider(config);
|
|
33
|
+
|
|
34
|
+
case 'mcp':
|
|
35
|
+
// TODO: Implement MCP provider
|
|
36
|
+
console.warn('MCP provider not yet implemented');
|
|
37
|
+
return null;
|
|
38
|
+
|
|
39
|
+
default:
|
|
40
|
+
console.warn(`Unknown LLM provider type: ${config.type}`);
|
|
41
|
+
return null;
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Check if a provider is available
|
|
47
|
+
*/
|
|
48
|
+
export async function isProviderAvailable(config: LLMProviderConfig): Promise<boolean> {
|
|
49
|
+
if (!config.enabled) {
|
|
50
|
+
return false;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
const provider = createLLMProvider(config);
|
|
54
|
+
if (!provider) {
|
|
55
|
+
return false;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
return await provider.isAvailable();
|
|
59
|
+
}
|
|
60
|
+
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
// Mock Provider for Testing
|
|
2
|
+
// Returns deterministic responses for testing
|
|
3
|
+
|
|
4
|
+
import { BaseLLMProvider } from './base';
|
|
5
|
+
import type { LLMProviderConfig, MCPToolInput, MCPToolOutput } from '../types';
|
|
6
|
+
|
|
7
|
+
export class MockProvider extends BaseLLMProvider {
|
|
8
|
+
name = 'mock';
|
|
9
|
+
private responses: Map<string, MCPToolOutput> = new Map();
|
|
10
|
+
|
|
11
|
+
constructor(config: LLMProviderConfig) {
|
|
12
|
+
super(
|
|
13
|
+
config.safety?.requireUserReview ?? false, // Mock doesn't need review
|
|
14
|
+
config.safety?.cacheEnabled ?? false,
|
|
15
|
+
config.safety?.cacheTtl ?? 3600
|
|
16
|
+
);
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
async isAvailable(): Promise<boolean> {
|
|
20
|
+
return true; // Mock is always available
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Set a mock response for a specific input
|
|
25
|
+
*/
|
|
26
|
+
setMockResponse(input: MCPToolInput, output: MCPToolOutput): void {
|
|
27
|
+
const key = this.getInputKey(input);
|
|
28
|
+
this.responses.set(key, output);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Clear all mock responses
|
|
33
|
+
*/
|
|
34
|
+
clearMockResponses(): void {
|
|
35
|
+
this.responses.clear();
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
async callLLM(input: MCPToolInput, _sanitized: any): Promise<MCPToolOutput> {
|
|
39
|
+
const key = this.getInputKey(input);
|
|
40
|
+
|
|
41
|
+
// Check if we have a preset response
|
|
42
|
+
if (this.responses.has(key)) {
|
|
43
|
+
return this.responses.get(key)!;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
// Default mock response
|
|
47
|
+
return {
|
|
48
|
+
suggestions: [{
|
|
49
|
+
title: 'Mock Suggestion',
|
|
50
|
+
description: `This is a mock suggestion for command: ${input.contextWindow.command}`,
|
|
51
|
+
actionableSnippet: `# Mock fix for ${input.contextWindow.command}`,
|
|
52
|
+
confidence: 0.7,
|
|
53
|
+
type: 'tip',
|
|
54
|
+
priority: 'medium',
|
|
55
|
+
}],
|
|
56
|
+
provenance: {
|
|
57
|
+
provider: 'mock',
|
|
58
|
+
timestamp: Date.now(),
|
|
59
|
+
},
|
|
60
|
+
};
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
private getInputKey(input: MCPToolInput): string {
|
|
64
|
+
return `${input.contextWindow.command}_${input.contextWindow.exitCode}_${input.contextWindow.stderr.substring(0, 100)}`;
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
// Ollama Provider
|
|
2
|
+
// Local model support via Ollama API
|
|
3
|
+
|
|
4
|
+
import { BaseLLMProvider } from './base';
|
|
5
|
+
import type { LLMProviderConfig, MCPToolInput, MCPToolOutput } from '../types';
|
|
6
|
+
|
|
7
|
+
export class OllamaProvider extends BaseLLMProvider {
|
|
8
|
+
name = 'ollama';
|
|
9
|
+
private baseUrl: string;
|
|
10
|
+
private model: string;
|
|
11
|
+
|
|
12
|
+
constructor(config: LLMProviderConfig) {
|
|
13
|
+
super(
|
|
14
|
+
config.safety?.requireUserReview ?? true,
|
|
15
|
+
config.safety?.cacheEnabled ?? false,
|
|
16
|
+
config.safety?.cacheTtl ?? 3600
|
|
17
|
+
);
|
|
18
|
+
this.baseUrl = config.ollama?.baseUrl || 'http://localhost:11434';
|
|
19
|
+
this.model = config.ollama?.model || 'llama3.2';
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
async isAvailable(): Promise<boolean> {
|
|
23
|
+
try {
|
|
24
|
+
const response = await fetch(`${this.baseUrl}/api/tags`, {
|
|
25
|
+
method: 'GET',
|
|
26
|
+
headers: { 'Content-Type': 'application/json' },
|
|
27
|
+
});
|
|
28
|
+
return response.ok;
|
|
29
|
+
} catch {
|
|
30
|
+
return false;
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
async callLLM(input: MCPToolInput, _sanitized: any): Promise<MCPToolOutput> {
|
|
35
|
+
// Build prompt
|
|
36
|
+
const prompt = this.buildPrompt(input);
|
|
37
|
+
|
|
38
|
+
// Call Ollama API
|
|
39
|
+
const response = await fetch(`${this.baseUrl}/api/generate`, {
|
|
40
|
+
method: 'POST',
|
|
41
|
+
headers: { 'Content-Type': 'application/json' },
|
|
42
|
+
body: JSON.stringify({
|
|
43
|
+
model: this.model,
|
|
44
|
+
prompt,
|
|
45
|
+
stream: false,
|
|
46
|
+
options: {
|
|
47
|
+
temperature: 0.7,
|
|
48
|
+
top_p: 0.9,
|
|
49
|
+
},
|
|
50
|
+
}),
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
if (!response.ok) {
|
|
54
|
+
throw new Error(`Ollama API error: ${response.statusText}`);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
const data = await response.json();
|
|
58
|
+
const text = data.response || '';
|
|
59
|
+
|
|
60
|
+
// Parse response
|
|
61
|
+
return this.parseResponse(text, input);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
private buildPrompt(input: MCPToolInput): string {
|
|
65
|
+
const { contextWindow, errorSummary, repoMetadata } = input;
|
|
66
|
+
|
|
67
|
+
return `You are a helpful assistant analyzing terminal command failures. Provide actionable suggestions.
|
|
68
|
+
|
|
69
|
+
Command: ${contextWindow.command} ${contextWindow.args.join(' ')}
|
|
70
|
+
Working Directory: ${contextWindow.cwd}
|
|
71
|
+
Exit Code: ${contextWindow.exitCode}
|
|
72
|
+
|
|
73
|
+
Error Output:
|
|
74
|
+
${contextWindow.stderr.substring(0, 2000)}
|
|
75
|
+
|
|
76
|
+
Standard Output:
|
|
77
|
+
${contextWindow.stdout.substring(0, 1000)}
|
|
78
|
+
|
|
79
|
+
Previous Commands:
|
|
80
|
+
${contextWindow.previousCommands.slice(-3).map(c => ` ${c.command} ${c.args.join(' ')} (exit: ${c.exitCode})`).join('\n')}
|
|
81
|
+
|
|
82
|
+
Repository Context:
|
|
83
|
+
${repoMetadata.type ? `Type: ${repoMetadata.type}` : 'Unknown'}
|
|
84
|
+
${repoMetadata.language ? `Language: ${repoMetadata.language}` : ''}
|
|
85
|
+
${repoMetadata.files && repoMetadata.files.length > 0 ? `Relevant files: ${repoMetadata.files.slice(0, 5).join(', ')}` : ''}
|
|
86
|
+
|
|
87
|
+
Provide 1-3 actionable suggestions in JSON format:
|
|
88
|
+
{
|
|
89
|
+
"suggestions": [
|
|
90
|
+
{
|
|
91
|
+
"title": "Short title",
|
|
92
|
+
"description": "Detailed explanation",
|
|
93
|
+
"actionableSnippet": "Code or command to fix",
|
|
94
|
+
"confidence": 0.0-1.0,
|
|
95
|
+
"type": "command|optimization|shortcut|warning|tip",
|
|
96
|
+
"priority": "low|medium|high"
|
|
97
|
+
}
|
|
98
|
+
]
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
Only return valid JSON, no other text.`;
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
private parseResponse(text: string, input: MCPToolInput): MCPToolOutput {
|
|
105
|
+
try {
|
|
106
|
+
// Try to extract JSON from response
|
|
107
|
+
const jsonMatch = text.match(/\{[\s\S]*\}/);
|
|
108
|
+
if (!jsonMatch) {
|
|
109
|
+
throw new Error('No JSON found in response');
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
const parsed = JSON.parse(jsonMatch[0]);
|
|
113
|
+
|
|
114
|
+
// Convert to MCPToolOutput format
|
|
115
|
+
const suggestions = (parsed.suggestions || []).map((s: any) => ({
|
|
116
|
+
title: s.title || 'Suggestion',
|
|
117
|
+
description: s.description || '',
|
|
118
|
+
actionableSnippet: s.actionableSnippet,
|
|
119
|
+
confidence: Math.max(0, Math.min(1, s.confidence || 0.5)),
|
|
120
|
+
type: s.type || 'tip',
|
|
121
|
+
priority: s.priority || 'medium',
|
|
122
|
+
}));
|
|
123
|
+
|
|
124
|
+
return {
|
|
125
|
+
suggestions,
|
|
126
|
+
provenance: {
|
|
127
|
+
provider: 'ollama',
|
|
128
|
+
model: this.model,
|
|
129
|
+
timestamp: Date.now(),
|
|
130
|
+
},
|
|
131
|
+
};
|
|
132
|
+
} catch (error) {
|
|
133
|
+
// Fallback: create a generic suggestion from the text
|
|
134
|
+
return {
|
|
135
|
+
suggestions: [{
|
|
136
|
+
title: 'LLM Analysis',
|
|
137
|
+
description: text.substring(0, 500),
|
|
138
|
+
confidence: 0.5,
|
|
139
|
+
type: 'tip',
|
|
140
|
+
priority: 'medium',
|
|
141
|
+
}],
|
|
142
|
+
provenance: {
|
|
143
|
+
provider: 'ollama',
|
|
144
|
+
model: this.model,
|
|
145
|
+
timestamp: Date.now(),
|
|
146
|
+
},
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
// OpenAI Provider
|
|
2
|
+
// OpenAI API support via API key
|
|
3
|
+
|
|
4
|
+
import { BaseLLMProvider } from './base';
|
|
5
|
+
import type { LLMProviderConfig, MCPToolInput, MCPToolOutput } from '../types';
|
|
6
|
+
|
|
7
|
+
export class OpenAIProvider extends BaseLLMProvider {
|
|
8
|
+
name = 'openai';
|
|
9
|
+
private apiKey: string;
|
|
10
|
+
private model: string;
|
|
11
|
+
private baseUrl: string;
|
|
12
|
+
|
|
13
|
+
constructor(config: LLMProviderConfig) {
|
|
14
|
+
super(
|
|
15
|
+
config.safety?.requireUserReview ?? true,
|
|
16
|
+
config.safety?.cacheEnabled ?? false,
|
|
17
|
+
config.safety?.cacheTtl ?? 3600
|
|
18
|
+
);
|
|
19
|
+
|
|
20
|
+
// Get API key from env var
|
|
21
|
+
this.apiKey = config.openai?.apiKey || process.env.OPENAI_API_KEY || '';
|
|
22
|
+
if (!this.apiKey) {
|
|
23
|
+
throw new Error('OpenAI API key not found. Set OPENAI_API_KEY environment variable.');
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
this.model = config.openai?.model || 'gpt-4o-mini';
|
|
27
|
+
this.baseUrl = config.openai?.baseUrl || 'https://api.openai.com/v1';
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
async isAvailable(): Promise<boolean> {
|
|
31
|
+
return this.apiKey.length > 0;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
async callLLM(input: MCPToolInput, _sanitized: any): Promise<MCPToolOutput> {
|
|
35
|
+
// Build messages
|
|
36
|
+
const messages = this.buildMessages(input);
|
|
37
|
+
|
|
38
|
+
// Call OpenAI API
|
|
39
|
+
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
|
40
|
+
method: 'POST',
|
|
41
|
+
headers: {
|
|
42
|
+
'Content-Type': 'application/json',
|
|
43
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
44
|
+
},
|
|
45
|
+
body: JSON.stringify({
|
|
46
|
+
model: this.model,
|
|
47
|
+
messages,
|
|
48
|
+
temperature: 0.7,
|
|
49
|
+
response_format: { type: 'json_object' },
|
|
50
|
+
}),
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
if (!response.ok) {
|
|
54
|
+
const error = await response.text();
|
|
55
|
+
throw new Error(`OpenAI API error: ${response.statusText} - ${error}`);
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
const data = await response.json();
|
|
59
|
+
const text = data.choices[0]?.message?.content || '';
|
|
60
|
+
|
|
61
|
+
// Parse response
|
|
62
|
+
return this.parseResponse(text, input, data.usage);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
private buildMessages(input: MCPToolInput): Array<{ role: string; content: string }> {
|
|
66
|
+
const { contextWindow, errorSummary, repoMetadata } = input;
|
|
67
|
+
|
|
68
|
+
const systemPrompt = `You are a helpful assistant analyzing terminal command failures. Provide actionable suggestions in JSON format.`;
|
|
69
|
+
|
|
70
|
+
const userPrompt = `Analyze this command failure:
|
|
71
|
+
|
|
72
|
+
Command: ${contextWindow.command} ${contextWindow.args.join(' ')}
|
|
73
|
+
Working Directory: ${contextWindow.cwd}
|
|
74
|
+
Exit Code: ${contextWindow.exitCode}
|
|
75
|
+
|
|
76
|
+
Error Output:
|
|
77
|
+
${contextWindow.stderr.substring(0, 2000)}
|
|
78
|
+
|
|
79
|
+
Standard Output:
|
|
80
|
+
${contextWindow.stdout.substring(0, 1000)}
|
|
81
|
+
|
|
82
|
+
Previous Commands:
|
|
83
|
+
${contextWindow.previousCommands.slice(-3).map(c => ` ${c.command} ${c.args.join(' ')} (exit: ${c.exitCode})`).join('\n')}
|
|
84
|
+
|
|
85
|
+
Repository Context:
|
|
86
|
+
${repoMetadata.type ? `Type: ${repoMetadata.type}` : 'Unknown'}
|
|
87
|
+
${repoMetadata.language ? `Language: ${repoMetadata.language}` : ''}
|
|
88
|
+
${repoMetadata.files && repoMetadata.files.length > 0 ? `Relevant files: ${repoMetadata.files.slice(0, 5).join(', ')}` : ''}
|
|
89
|
+
|
|
90
|
+
Provide 1-3 actionable suggestions in this JSON format:
|
|
91
|
+
{
|
|
92
|
+
"suggestions": [
|
|
93
|
+
{
|
|
94
|
+
"title": "Short title",
|
|
95
|
+
"description": "Detailed explanation",
|
|
96
|
+
"actionableSnippet": "Code or command to fix",
|
|
97
|
+
"confidence": 0.0-1.0,
|
|
98
|
+
"type": "command|optimization|shortcut|warning|tip",
|
|
99
|
+
"priority": "low|medium|high"
|
|
100
|
+
}
|
|
101
|
+
]
|
|
102
|
+
}`;
|
|
103
|
+
|
|
104
|
+
return [
|
|
105
|
+
{ role: 'system', content: systemPrompt },
|
|
106
|
+
{ role: 'user', content: userPrompt },
|
|
107
|
+
];
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
private parseResponse(text: string, input: MCPToolInput, usage?: any): MCPToolOutput {
|
|
111
|
+
try {
|
|
112
|
+
const parsed = JSON.parse(text);
|
|
113
|
+
|
|
114
|
+
// Convert to MCPToolOutput format
|
|
115
|
+
const suggestions = (parsed.suggestions || []).map((s: any) => ({
|
|
116
|
+
title: s.title || 'Suggestion',
|
|
117
|
+
description: s.description || '',
|
|
118
|
+
actionableSnippet: s.actionableSnippet,
|
|
119
|
+
confidence: Math.max(0, Math.min(1, s.confidence || 0.5)),
|
|
120
|
+
type: s.type || 'tip',
|
|
121
|
+
priority: s.priority || 'medium',
|
|
122
|
+
}));
|
|
123
|
+
|
|
124
|
+
return {
|
|
125
|
+
suggestions,
|
|
126
|
+
provenance: {
|
|
127
|
+
provider: 'openai',
|
|
128
|
+
model: this.model,
|
|
129
|
+
timestamp: Date.now(),
|
|
130
|
+
tokensUsed: usage?.total_tokens,
|
|
131
|
+
},
|
|
132
|
+
};
|
|
133
|
+
} catch (error) {
|
|
134
|
+
// Fallback: create a generic suggestion
|
|
135
|
+
return {
|
|
136
|
+
suggestions: [{
|
|
137
|
+
title: 'LLM Analysis',
|
|
138
|
+
description: text.substring(0, 500),
|
|
139
|
+
confidence: 0.5,
|
|
140
|
+
type: 'tip',
|
|
141
|
+
priority: 'medium',
|
|
142
|
+
}],
|
|
143
|
+
provenance: {
|
|
144
|
+
provider: 'openai',
|
|
145
|
+
model: this.model,
|
|
146
|
+
timestamp: Date.now(),
|
|
147
|
+
tokensUsed: usage?.total_tokens,
|
|
148
|
+
},
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
// Context Sanitization for LLM Safety
|
|
2
|
+
// Redacts secrets, tokens, and sensitive information before sending to LLM
|
|
3
|
+
|
|
4
|
+
import type { AnalysisContext, SanitizedContext } from './types';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Patterns to redact from context
|
|
8
|
+
*/
|
|
9
|
+
const SECRET_PATTERNS = [
|
|
10
|
+
// API keys and tokens
|
|
11
|
+
/\b[A-Za-z0-9]{32,}\b/g, // Long alphanumeric strings (likely tokens)
|
|
12
|
+
/\bghp_[A-Za-z0-9]{36,}\b/g, // GitHub personal access tokens
|
|
13
|
+
/\bgho_[A-Za-z0-9]{36,}\b/g, // GitHub OAuth tokens
|
|
14
|
+
/\bghu_[A-Za-z0-9]{36,}\b/g, // GitHub user-to-server tokens
|
|
15
|
+
/\bghs_[A-Za-z0-9]{36,}\b/g, // GitHub server-to-server tokens
|
|
16
|
+
/\bsk-[A-Za-z0-9]{32,}\b/g, // Stripe keys
|
|
17
|
+
/\bpk_[A-Za-z0-9]{32,}\b/g, // Stripe publishable keys
|
|
18
|
+
/\bAIza[0-9A-Za-z_-]{35}\b/g, // Google API keys
|
|
19
|
+
/\bAKIA[0-9A-Z]{16}\b/g, // AWS access keys
|
|
20
|
+
/\b[A-Za-z0-9/+=]{40}\b/g, // Base64 encoded secrets (40+ chars)
|
|
21
|
+
|
|
22
|
+
// Environment variables that might contain secrets
|
|
23
|
+
/(?:password|passwd|pwd|secret|token|key|api_key|apikey|auth|credential)\s*=\s*['"]?([^'"\s]+)['"]?/gi,
|
|
24
|
+
|
|
25
|
+
// Private keys
|
|
26
|
+
/-----BEGIN\s+(?:RSA\s+)?PRIVATE\s+KEY-----[\s\S]*?-----END\s+(?:RSA\s+)?PRIVATE\s+KEY-----/gi,
|
|
27
|
+
|
|
28
|
+
// JWT tokens
|
|
29
|
+
/\beyJ[A-Za-z0-9_-]+\.[A-Za-z0-9_-]+\.[A-Za-z0-9_-]+\b/g,
|
|
30
|
+
];
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Redact a string, replacing secrets with placeholders
|
|
34
|
+
*/
|
|
35
|
+
function redactString(text: string): { sanitized: string; redactions: Array<{ pattern: string; replaced: string }> } {
|
|
36
|
+
let sanitized = text;
|
|
37
|
+
const redactions: Array<{ pattern: string; replaced: string }> = [];
|
|
38
|
+
|
|
39
|
+
for (const pattern of SECRET_PATTERNS) {
|
|
40
|
+
const matches = text.matchAll(pattern);
|
|
41
|
+
for (const match of matches) {
|
|
42
|
+
if (match[0] && match.index !== undefined) {
|
|
43
|
+
const placeholder = `[REDACTED:${match[0].substring(0, 8)}...]`;
|
|
44
|
+
sanitized = sanitized.replace(match[0], placeholder);
|
|
45
|
+
redactions.push({
|
|
46
|
+
pattern: match[0],
|
|
47
|
+
replaced: placeholder,
|
|
48
|
+
});
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
return { sanitized, redactions };
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Sanitize environment variables
|
|
58
|
+
*/
|
|
59
|
+
function sanitizeEnv(env: Record<string, string>): { sanitized: Record<string, string>; redactions: Array<{ pattern: string; replaced: string }> } {
|
|
60
|
+
const sanitized: Record<string, string> = {};
|
|
61
|
+
const redactions: Array<{ pattern: string; replaced: string }> = [];
|
|
62
|
+
|
|
63
|
+
for (const [key, value] of Object.entries(env)) {
|
|
64
|
+
const keyLower = key.toLowerCase();
|
|
65
|
+
// Redact common secret env vars
|
|
66
|
+
if (keyLower.includes('password') ||
|
|
67
|
+
keyLower.includes('secret') ||
|
|
68
|
+
keyLower.includes('token') ||
|
|
69
|
+
keyLower.includes('key') ||
|
|
70
|
+
keyLower.includes('credential') ||
|
|
71
|
+
keyLower.includes('api_key')) {
|
|
72
|
+
sanitized[key] = '[REDACTED]';
|
|
73
|
+
redactions.push({
|
|
74
|
+
pattern: `${key}=${value}`,
|
|
75
|
+
replaced: `${key}=[REDACTED]`,
|
|
76
|
+
});
|
|
77
|
+
} else {
|
|
78
|
+
// Still check value for secrets
|
|
79
|
+
const { sanitized: sanitizedValue, redactions: valueRedactions } = redactString(value);
|
|
80
|
+
sanitized[key] = sanitizedValue;
|
|
81
|
+
redactions.push(...valueRedactions.map(r => ({
|
|
82
|
+
pattern: r.pattern,
|
|
83
|
+
replaced: r.replaced,
|
|
84
|
+
})));
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
return { sanitized, redactions };
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Sanitize analysis context
|
|
93
|
+
*/
|
|
94
|
+
export function sanitizeContext(context: AnalysisContext): SanitizedContext {
|
|
95
|
+
// Sanitize environment
|
|
96
|
+
const { sanitized: sanitizedEnv, redactions: envRedactions } = sanitizeEnv(context.env);
|
|
97
|
+
|
|
98
|
+
// Sanitize stdout and stderr
|
|
99
|
+
const { sanitized: sanitizedStdout, redactions: stdoutRedactions } = redactString(context.stdout);
|
|
100
|
+
const { sanitized: sanitizedStderr, redactions: stderrRedactions } = redactString(context.stderr);
|
|
101
|
+
|
|
102
|
+
// Sanitize command args (might contain secrets)
|
|
103
|
+
const sanitizedArgs = context.args.map(arg => {
|
|
104
|
+
const { sanitized } = redactString(arg);
|
|
105
|
+
return sanitized;
|
|
106
|
+
});
|
|
107
|
+
|
|
108
|
+
const sanitized: AnalysisContext = {
|
|
109
|
+
...context,
|
|
110
|
+
env: sanitizedEnv,
|
|
111
|
+
stdout: sanitizedStdout,
|
|
112
|
+
stderr: sanitizedStderr,
|
|
113
|
+
args: sanitizedArgs,
|
|
114
|
+
};
|
|
115
|
+
|
|
116
|
+
const redactions = [
|
|
117
|
+
...envRedactions.map(r => ({ ...r, type: 'env' as const })),
|
|
118
|
+
...stdoutRedactions.map(r => ({ ...r, type: 'stdout' as const })),
|
|
119
|
+
...stderrRedactions.map(r => ({ ...r, type: 'stderr' as const })),
|
|
120
|
+
];
|
|
121
|
+
|
|
122
|
+
return {
|
|
123
|
+
original: context,
|
|
124
|
+
sanitized,
|
|
125
|
+
redactions,
|
|
126
|
+
};
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
/**
|
|
130
|
+
* Format sanitized context for user review
|
|
131
|
+
*/
|
|
132
|
+
export function formatContextForReview(sanitized: SanitizedContext): string {
|
|
133
|
+
const lines: string[] = [];
|
|
134
|
+
|
|
135
|
+
lines.push('=== Context to be sent to LLM ===\n');
|
|
136
|
+
lines.push(`Command: ${sanitized.sanitized.command} ${sanitized.sanitized.args.join(' ')}`);
|
|
137
|
+
lines.push(`CWD: ${sanitized.sanitized.cwd}`);
|
|
138
|
+
lines.push(`Exit Code: ${sanitized.sanitized.exitCode}\n`);
|
|
139
|
+
|
|
140
|
+
if (sanitized.sanitized.stderr) {
|
|
141
|
+
lines.push(`Stderr (${sanitized.sanitized.stderr.length} chars):`);
|
|
142
|
+
lines.push(sanitized.sanitized.stderr.substring(0, 500));
|
|
143
|
+
if (sanitized.sanitized.stderr.length > 500) {
|
|
144
|
+
lines.push('... (truncated)');
|
|
145
|
+
}
|
|
146
|
+
lines.push('');
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
if (sanitized.sanitized.stdout) {
|
|
150
|
+
lines.push(`Stdout (${sanitized.sanitized.stdout.length} chars):`);
|
|
151
|
+
lines.push(sanitized.sanitized.stdout.substring(0, 500));
|
|
152
|
+
if (sanitized.sanitized.stdout.length > 500) {
|
|
153
|
+
lines.push('... (truncated)');
|
|
154
|
+
}
|
|
155
|
+
lines.push('');
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
if (sanitized.redactions.length > 0) {
|
|
159
|
+
lines.push(`\n=== Redactions Applied (${sanitized.redactions.length}) ===`);
|
|
160
|
+
for (const redaction of sanitized.redactions.slice(0, 10)) {
|
|
161
|
+
lines.push(` [${redaction.type}] ${redaction.pattern.substring(0, 50)}... → ${redaction.replaced}`);
|
|
162
|
+
}
|
|
163
|
+
if (sanitized.redactions.length > 10) {
|
|
164
|
+
lines.push(` ... and ${sanitized.redactions.length - 10} more`);
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
return lines.join('\n');
|
|
169
|
+
}
|
|
170
|
+
|