keystone-cli 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +136 -0
- package/logo.png +0 -0
- package/package.json +45 -0
- package/src/cli.ts +775 -0
- package/src/db/workflow-db.test.ts +99 -0
- package/src/db/workflow-db.ts +265 -0
- package/src/expression/evaluator.test.ts +247 -0
- package/src/expression/evaluator.ts +517 -0
- package/src/parser/agent-parser.test.ts +123 -0
- package/src/parser/agent-parser.ts +59 -0
- package/src/parser/config-schema.ts +54 -0
- package/src/parser/schema.ts +157 -0
- package/src/parser/workflow-parser.test.ts +212 -0
- package/src/parser/workflow-parser.ts +228 -0
- package/src/runner/llm-adapter.test.ts +329 -0
- package/src/runner/llm-adapter.ts +306 -0
- package/src/runner/llm-executor.test.ts +537 -0
- package/src/runner/llm-executor.ts +256 -0
- package/src/runner/mcp-client.test.ts +122 -0
- package/src/runner/mcp-client.ts +123 -0
- package/src/runner/mcp-manager.test.ts +143 -0
- package/src/runner/mcp-manager.ts +85 -0
- package/src/runner/mcp-server.test.ts +242 -0
- package/src/runner/mcp-server.ts +436 -0
- package/src/runner/retry.test.ts +52 -0
- package/src/runner/retry.ts +58 -0
- package/src/runner/shell-executor.test.ts +123 -0
- package/src/runner/shell-executor.ts +166 -0
- package/src/runner/step-executor.test.ts +465 -0
- package/src/runner/step-executor.ts +354 -0
- package/src/runner/timeout.test.ts +20 -0
- package/src/runner/timeout.ts +30 -0
- package/src/runner/tool-integration.test.ts +198 -0
- package/src/runner/workflow-runner.test.ts +358 -0
- package/src/runner/workflow-runner.ts +955 -0
- package/src/ui/dashboard.tsx +165 -0
- package/src/utils/auth-manager.test.ts +152 -0
- package/src/utils/auth-manager.ts +88 -0
- package/src/utils/config-loader.test.ts +52 -0
- package/src/utils/config-loader.ts +85 -0
- package/src/utils/mermaid.test.ts +51 -0
- package/src/utils/mermaid.ts +87 -0
- package/src/utils/redactor.test.ts +66 -0
- package/src/utils/redactor.ts +60 -0
- package/src/utils/workflow-registry.test.ts +108 -0
- package/src/utils/workflow-registry.ts +121 -0
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
import { Box, Newline, Text, render, useInput } from 'ink';
|
|
2
|
+
import React, { useState, useEffect, useCallback } from 'react';
|
|
3
|
+
import { WorkflowDb } from '../db/workflow-db.ts';
|
|
4
|
+
|
|
5
|
+
interface Run {
|
|
6
|
+
id: string;
|
|
7
|
+
workflow_name: string;
|
|
8
|
+
status: string;
|
|
9
|
+
started_at: string;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
const Dashboard = () => {
|
|
13
|
+
const [runs, setRuns] = useState<Run[]>([]);
|
|
14
|
+
const [loading, setLoading] = useState(true);
|
|
15
|
+
|
|
16
|
+
const fetchData = useCallback(() => {
|
|
17
|
+
const db = new WorkflowDb();
|
|
18
|
+
try {
|
|
19
|
+
const recentRuns = db.listRuns(10);
|
|
20
|
+
setRuns(recentRuns);
|
|
21
|
+
} catch (error) {
|
|
22
|
+
console.error('Failed to fetch runs:', error);
|
|
23
|
+
} finally {
|
|
24
|
+
setLoading(false);
|
|
25
|
+
db.close();
|
|
26
|
+
}
|
|
27
|
+
}, []);
|
|
28
|
+
|
|
29
|
+
useEffect(() => {
|
|
30
|
+
fetchData();
|
|
31
|
+
const interval = setInterval(fetchData, 2000);
|
|
32
|
+
return () => clearInterval(interval);
|
|
33
|
+
}, [fetchData]);
|
|
34
|
+
|
|
35
|
+
useInput((input) => {
|
|
36
|
+
if (input === 'r') {
|
|
37
|
+
fetchData();
|
|
38
|
+
}
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
if (loading) {
|
|
42
|
+
return (
|
|
43
|
+
<Box>
|
|
44
|
+
<Text color="cyan">Loading Keystone Dashboard...</Text>
|
|
45
|
+
</Box>
|
|
46
|
+
);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
return (
|
|
50
|
+
<Box flexDirection="column" padding={1}>
|
|
51
|
+
<Box marginBottom={1}>
|
|
52
|
+
<Text bold color="magenta">
|
|
53
|
+
šļø KEYSTONE DASHBOARD
|
|
54
|
+
</Text>
|
|
55
|
+
</Box>
|
|
56
|
+
|
|
57
|
+
<Box borderStyle="round" borderColor="gray" flexDirection="column" paddingX={1}>
|
|
58
|
+
<Box marginBottom={0}>
|
|
59
|
+
<Box width={12}>
|
|
60
|
+
<Text bold color="cyan">
|
|
61
|
+
ID
|
|
62
|
+
</Text>
|
|
63
|
+
</Box>
|
|
64
|
+
<Box width={30}>
|
|
65
|
+
<Text bold color="cyan">
|
|
66
|
+
WORKFLOW
|
|
67
|
+
</Text>
|
|
68
|
+
</Box>
|
|
69
|
+
<Box width={15}>
|
|
70
|
+
<Text bold color="cyan">
|
|
71
|
+
STATUS
|
|
72
|
+
</Text>
|
|
73
|
+
</Box>
|
|
74
|
+
<Box>
|
|
75
|
+
<Text bold color="cyan">
|
|
76
|
+
STARTED
|
|
77
|
+
</Text>
|
|
78
|
+
</Box>
|
|
79
|
+
</Box>
|
|
80
|
+
|
|
81
|
+
<Box marginBottom={1}>
|
|
82
|
+
<Text color="gray">{'ā'.repeat(80)}</Text>
|
|
83
|
+
</Box>
|
|
84
|
+
|
|
85
|
+
{runs.length === 0 ? (
|
|
86
|
+
<Text italic color="gray">
|
|
87
|
+
No workflow runs found.
|
|
88
|
+
</Text>
|
|
89
|
+
) : (
|
|
90
|
+
runs.map((run) => (
|
|
91
|
+
<Box key={run.id} marginBottom={0}>
|
|
92
|
+
<Box width={12}>
|
|
93
|
+
<Text color="gray">{run.id.substring(0, 8)}</Text>
|
|
94
|
+
</Box>
|
|
95
|
+
<Box width={30}>
|
|
96
|
+
<Text>{run.workflow_name}</Text>
|
|
97
|
+
</Box>
|
|
98
|
+
<Box width={15}>
|
|
99
|
+
<Text color={getStatusColor(run.status)}>
|
|
100
|
+
{getStatusIcon(run.status)} {run.status.toUpperCase()}
|
|
101
|
+
</Text>
|
|
102
|
+
</Box>
|
|
103
|
+
<Box>
|
|
104
|
+
<Text color="gray">{new Date(run.started_at).toLocaleString()}</Text>
|
|
105
|
+
</Box>
|
|
106
|
+
</Box>
|
|
107
|
+
))
|
|
108
|
+
)}
|
|
109
|
+
</Box>
|
|
110
|
+
|
|
111
|
+
<Box marginTop={1} paddingX={1}>
|
|
112
|
+
<Text color="gray">
|
|
113
|
+
<Text bold color="white">
|
|
114
|
+
{' '}
|
|
115
|
+
r{' '}
|
|
116
|
+
</Text>{' '}
|
|
117
|
+
refresh ā¢
|
|
118
|
+
<Text bold color="white">
|
|
119
|
+
{' '}
|
|
120
|
+
Ctrl+C{' '}
|
|
121
|
+
</Text>{' '}
|
|
122
|
+
exit ā¢<Text italic> Auto-refreshing every 2s</Text>
|
|
123
|
+
</Text>
|
|
124
|
+
</Box>
|
|
125
|
+
</Box>
|
|
126
|
+
);
|
|
127
|
+
};
|
|
128
|
+
|
|
129
|
+
const getStatusColor = (status: string) => {
|
|
130
|
+
switch (status.toLowerCase()) {
|
|
131
|
+
case 'completed':
|
|
132
|
+
return 'green';
|
|
133
|
+
case 'failed':
|
|
134
|
+
return 'red';
|
|
135
|
+
case 'running':
|
|
136
|
+
return 'yellow';
|
|
137
|
+
case 'paused':
|
|
138
|
+
return 'blue';
|
|
139
|
+
case 'pending':
|
|
140
|
+
return 'gray';
|
|
141
|
+
default:
|
|
142
|
+
return 'white';
|
|
143
|
+
}
|
|
144
|
+
};
|
|
145
|
+
|
|
146
|
+
const getStatusIcon = (status: string) => {
|
|
147
|
+
switch (status.toLowerCase()) {
|
|
148
|
+
case 'completed':
|
|
149
|
+
return 'ā
';
|
|
150
|
+
case 'failed':
|
|
151
|
+
return 'ā';
|
|
152
|
+
case 'running':
|
|
153
|
+
return 'ā³';
|
|
154
|
+
case 'paused':
|
|
155
|
+
return 'āøļø';
|
|
156
|
+
case 'pending':
|
|
157
|
+
return 'āŖ';
|
|
158
|
+
default:
|
|
159
|
+
return 'š¹';
|
|
160
|
+
}
|
|
161
|
+
};
|
|
162
|
+
|
|
163
|
+
export const startDashboard = () => {
|
|
164
|
+
render(<Dashboard />);
|
|
165
|
+
};
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
import { afterAll, beforeAll, beforeEach, describe, expect, it, mock, spyOn } from 'bun:test';
|
|
2
|
+
import * as fs from 'node:fs';
|
|
3
|
+
import { join } from 'node:path';
|
|
4
|
+
import { AuthManager } from './auth-manager.ts';
|
|
5
|
+
|
|
6
|
+
describe('AuthManager', () => {
|
|
7
|
+
const originalFetch = global.fetch;
|
|
8
|
+
const TEMP_AUTH_DIR = join(
|
|
9
|
+
process.cwd(),
|
|
10
|
+
`temp-auth-test-${Math.random().toString(36).substring(7)}`
|
|
11
|
+
);
|
|
12
|
+
const TEMP_AUTH_FILE = join(TEMP_AUTH_DIR, 'auth.json');
|
|
13
|
+
|
|
14
|
+
beforeAll(() => {
|
|
15
|
+
if (!fs.existsSync(TEMP_AUTH_DIR)) {
|
|
16
|
+
fs.mkdirSync(TEMP_AUTH_DIR, { recursive: true });
|
|
17
|
+
}
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
afterAll(() => {
|
|
21
|
+
global.fetch = originalFetch;
|
|
22
|
+
if (fs.existsSync(TEMP_AUTH_DIR)) {
|
|
23
|
+
fs.rmSync(TEMP_AUTH_DIR, { recursive: true, force: true });
|
|
24
|
+
}
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
beforeEach(() => {
|
|
28
|
+
if (fs.existsSync(TEMP_AUTH_FILE)) {
|
|
29
|
+
try {
|
|
30
|
+
fs.rmSync(TEMP_AUTH_FILE);
|
|
31
|
+
} catch (e) {}
|
|
32
|
+
}
|
|
33
|
+
global.fetch = originalFetch;
|
|
34
|
+
// Set environment variable for EACH test to be safe
|
|
35
|
+
process.env.KEYSTONE_AUTH_PATH = TEMP_AUTH_FILE;
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
describe('load()', () => {
|
|
39
|
+
it('should return empty object if auth file does not exist', () => {
|
|
40
|
+
const data = AuthManager.load();
|
|
41
|
+
expect(data).toEqual({});
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
it('should load and parse auth data if file exists', () => {
|
|
45
|
+
fs.writeFileSync(TEMP_AUTH_FILE, JSON.stringify({ github_token: 'test-token' }));
|
|
46
|
+
|
|
47
|
+
const data = AuthManager.load();
|
|
48
|
+
expect(data).toEqual({ github_token: 'test-token' });
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
it('should return empty object if JSON parsing fails', () => {
|
|
52
|
+
fs.writeFileSync(TEMP_AUTH_FILE, 'invalid-json');
|
|
53
|
+
|
|
54
|
+
const data = AuthManager.load();
|
|
55
|
+
expect(data).toEqual({});
|
|
56
|
+
});
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
describe('save()', () => {
|
|
60
|
+
it('should save data merged with current data', () => {
|
|
61
|
+
fs.writeFileSync(TEMP_AUTH_FILE, JSON.stringify({ github_token: 'old-token' }));
|
|
62
|
+
|
|
63
|
+
AuthManager.save({ copilot_token: 'new-copilot-token' });
|
|
64
|
+
|
|
65
|
+
const content = fs.readFileSync(TEMP_AUTH_FILE, 'utf8');
|
|
66
|
+
expect(JSON.parse(content)).toEqual({
|
|
67
|
+
github_token: 'old-token',
|
|
68
|
+
copilot_token: 'new-copilot-token',
|
|
69
|
+
});
|
|
70
|
+
});
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
describe('getCopilotToken()', () => {
|
|
74
|
+
it('should return undefined if no github_token', async () => {
|
|
75
|
+
fs.writeFileSync(TEMP_AUTH_FILE, JSON.stringify({}));
|
|
76
|
+
const token = await AuthManager.getCopilotToken();
|
|
77
|
+
expect(token).toBeUndefined();
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
it('should return cached token if valid', async () => {
|
|
81
|
+
const expires = Math.floor(Date.now() / 1000) + 1000;
|
|
82
|
+
fs.writeFileSync(
|
|
83
|
+
TEMP_AUTH_FILE,
|
|
84
|
+
JSON.stringify({
|
|
85
|
+
github_token: 'gh-token',
|
|
86
|
+
copilot_token: 'cached-token',
|
|
87
|
+
copilot_expires_at: expires,
|
|
88
|
+
})
|
|
89
|
+
);
|
|
90
|
+
|
|
91
|
+
const token = await AuthManager.getCopilotToken();
|
|
92
|
+
expect(token).toBe('cached-token');
|
|
93
|
+
});
|
|
94
|
+
|
|
95
|
+
it('should refresh token if expired', async () => {
|
|
96
|
+
fs.writeFileSync(
|
|
97
|
+
TEMP_AUTH_FILE,
|
|
98
|
+
JSON.stringify({
|
|
99
|
+
github_token: 'gh-token',
|
|
100
|
+
copilot_token: 'expired-token',
|
|
101
|
+
copilot_expires_at: Math.floor(Date.now() / 1000) - 1000,
|
|
102
|
+
})
|
|
103
|
+
);
|
|
104
|
+
|
|
105
|
+
// Mock fetch
|
|
106
|
+
const mockFetch = mock(() =>
|
|
107
|
+
Promise.resolve(
|
|
108
|
+
new Response(
|
|
109
|
+
JSON.stringify({
|
|
110
|
+
token: 'new-token',
|
|
111
|
+
expires_at: Math.floor(Date.now() / 1000) + 3600,
|
|
112
|
+
}),
|
|
113
|
+
{ status: 200 }
|
|
114
|
+
)
|
|
115
|
+
)
|
|
116
|
+
);
|
|
117
|
+
// @ts-ignore
|
|
118
|
+
global.fetch = mockFetch;
|
|
119
|
+
|
|
120
|
+
const token = await AuthManager.getCopilotToken();
|
|
121
|
+
expect(token).toBe('new-token');
|
|
122
|
+
expect(mockFetch).toHaveBeenCalled();
|
|
123
|
+
});
|
|
124
|
+
|
|
125
|
+
it('should return undefined and log error if refresh fails', async () => {
|
|
126
|
+
fs.writeFileSync(
|
|
127
|
+
TEMP_AUTH_FILE,
|
|
128
|
+
JSON.stringify({
|
|
129
|
+
github_token: 'gh-token',
|
|
130
|
+
})
|
|
131
|
+
);
|
|
132
|
+
|
|
133
|
+
// Mock fetch failure
|
|
134
|
+
// @ts-ignore
|
|
135
|
+
global.fetch = mock(() =>
|
|
136
|
+
Promise.resolve(
|
|
137
|
+
new Response('Unauthorized', {
|
|
138
|
+
status: 401,
|
|
139
|
+
statusText: 'Unauthorized',
|
|
140
|
+
})
|
|
141
|
+
)
|
|
142
|
+
);
|
|
143
|
+
|
|
144
|
+
const consoleSpy = spyOn(console, 'error').mockImplementation(() => {});
|
|
145
|
+
const token = await AuthManager.getCopilotToken();
|
|
146
|
+
|
|
147
|
+
expect(token).toBeUndefined();
|
|
148
|
+
expect(consoleSpy).toHaveBeenCalled();
|
|
149
|
+
consoleSpy.mockRestore();
|
|
150
|
+
});
|
|
151
|
+
});
|
|
152
|
+
});
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import { existsSync, mkdirSync, readFileSync, writeFileSync } from 'node:fs';
|
|
2
|
+
import { homedir } from 'node:os';
|
|
3
|
+
import { join } from 'node:path';
|
|
4
|
+
|
|
5
|
+
export interface AuthData {
|
|
6
|
+
github_token?: string;
|
|
7
|
+
copilot_token?: string;
|
|
8
|
+
copilot_expires_at?: number;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export const COPILOT_HEADERS = {
|
|
12
|
+
'Editor-Version': 'vscode/1.96.2',
|
|
13
|
+
'Editor-Plugin-Version': 'copilot-chat/0.23.1',
|
|
14
|
+
'User-Agent': 'GithubCopilot/1.255.0',
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
export class AuthManager {
|
|
18
|
+
private static getAuthPath(): string {
|
|
19
|
+
if (process.env.KEYSTONE_AUTH_PATH) {
|
|
20
|
+
return process.env.KEYSTONE_AUTH_PATH;
|
|
21
|
+
}
|
|
22
|
+
const dir = join(homedir(), '.keystone');
|
|
23
|
+
if (!existsSync(dir)) {
|
|
24
|
+
mkdirSync(dir, { recursive: true });
|
|
25
|
+
}
|
|
26
|
+
return join(dir, 'auth.json');
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
static load(): AuthData {
|
|
30
|
+
const path = AuthManager.getAuthPath();
|
|
31
|
+
if (existsSync(path)) {
|
|
32
|
+
try {
|
|
33
|
+
return JSON.parse(readFileSync(path, 'utf8'));
|
|
34
|
+
} catch {
|
|
35
|
+
return {};
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
return {};
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
static save(data: AuthData): void {
|
|
42
|
+
const path = AuthManager.getAuthPath();
|
|
43
|
+
const current = AuthManager.load();
|
|
44
|
+
writeFileSync(path, JSON.stringify({ ...current, ...data }, null, 2));
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
static async getCopilotToken(): Promise<string | undefined> {
|
|
48
|
+
const auth = AuthManager.load();
|
|
49
|
+
|
|
50
|
+
// Check if we have a valid cached token
|
|
51
|
+
if (
|
|
52
|
+
auth.copilot_token &&
|
|
53
|
+
auth.copilot_expires_at &&
|
|
54
|
+
auth.copilot_expires_at > Date.now() / 1000 + 300
|
|
55
|
+
) {
|
|
56
|
+
return auth.copilot_token;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
if (!auth.github_token) {
|
|
60
|
+
return undefined;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
// Exchange GitHub token for Copilot token
|
|
64
|
+
try {
|
|
65
|
+
const response = await fetch('https://api.github.com/copilot_internal/v2/token', {
|
|
66
|
+
headers: {
|
|
67
|
+
Authorization: `token ${auth.github_token}`,
|
|
68
|
+
...COPILOT_HEADERS,
|
|
69
|
+
},
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
if (!response.ok) {
|
|
73
|
+
throw new Error(`Failed to get Copilot token: ${response.statusText}`);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
const data = (await response.json()) as { token: string; expires_at: number };
|
|
77
|
+
AuthManager.save({
|
|
78
|
+
copilot_token: data.token,
|
|
79
|
+
copilot_expires_at: data.expires_at,
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
return data.token;
|
|
83
|
+
} catch (error) {
|
|
84
|
+
console.error('Error refreshing Copilot token:', error);
|
|
85
|
+
return undefined;
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
}
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import { describe, expect, it, afterEach } from 'bun:test';
|
|
2
|
+
import { ConfigLoader } from './config-loader';
|
|
3
|
+
import type { Config } from '../parser/config-schema';
|
|
4
|
+
|
|
5
|
+
describe('ConfigLoader', () => {
|
|
6
|
+
afterEach(() => {
|
|
7
|
+
ConfigLoader.clear();
|
|
8
|
+
});
|
|
9
|
+
|
|
10
|
+
it('should allow setting and clearing config', () => {
|
|
11
|
+
const mockConfig: Config = {
|
|
12
|
+
default_provider: 'test',
|
|
13
|
+
providers: {
|
|
14
|
+
test: { type: 'openai' },
|
|
15
|
+
},
|
|
16
|
+
model_mappings: {},
|
|
17
|
+
storage: { retention_days: 30 },
|
|
18
|
+
workflows_directory: 'workflows',
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
ConfigLoader.setConfig(mockConfig);
|
|
22
|
+
expect(ConfigLoader.load()).toEqual(mockConfig);
|
|
23
|
+
|
|
24
|
+
ConfigLoader.clear();
|
|
25
|
+
// After clear, it will try to load from disk or use defaults
|
|
26
|
+
const loaded = ConfigLoader.load();
|
|
27
|
+
expect(loaded).not.toEqual(mockConfig);
|
|
28
|
+
});
|
|
29
|
+
|
|
30
|
+
it('should return correct provider for model', () => {
|
|
31
|
+
const mockConfig: Config = {
|
|
32
|
+
default_provider: 'openai',
|
|
33
|
+
providers: {
|
|
34
|
+
openai: { type: 'openai' },
|
|
35
|
+
anthropic: { type: 'anthropic' },
|
|
36
|
+
copilot: { type: 'copilot' },
|
|
37
|
+
},
|
|
38
|
+
model_mappings: {
|
|
39
|
+
'gpt-*': 'copilot',
|
|
40
|
+
'claude-v1': 'anthropic',
|
|
41
|
+
},
|
|
42
|
+
storage: { retention_days: 30 },
|
|
43
|
+
workflows_directory: 'workflows',
|
|
44
|
+
};
|
|
45
|
+
ConfigLoader.setConfig(mockConfig);
|
|
46
|
+
|
|
47
|
+
expect(ConfigLoader.getProviderForModel('gpt-4')).toBe('copilot');
|
|
48
|
+
expect(ConfigLoader.getProviderForModel('claude-v1')).toBe('anthropic');
|
|
49
|
+
expect(ConfigLoader.getProviderForModel('unknown')).toBe('openai');
|
|
50
|
+
expect(ConfigLoader.getProviderForModel('anthropic:claude-3')).toBe('anthropic');
|
|
51
|
+
});
|
|
52
|
+
});
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import { existsSync, readFileSync } from 'node:fs';
|
|
2
|
+
import { join } from 'node:path';
|
|
3
|
+
import yaml from 'js-yaml';
|
|
4
|
+
import { type Config, ConfigSchema } from '../parser/config-schema';
|
|
5
|
+
|
|
6
|
+
export class ConfigLoader {
|
|
7
|
+
private static instance: Config;
|
|
8
|
+
|
|
9
|
+
static load(): Config {
|
|
10
|
+
if (ConfigLoader.instance) return ConfigLoader.instance;
|
|
11
|
+
|
|
12
|
+
const configPaths = [
|
|
13
|
+
join(process.cwd(), '.keystone', 'config.yaml'),
|
|
14
|
+
join(process.cwd(), '.keystone', 'config.yml'),
|
|
15
|
+
];
|
|
16
|
+
|
|
17
|
+
let userConfig: Record<string, unknown> = {};
|
|
18
|
+
|
|
19
|
+
for (const path of configPaths) {
|
|
20
|
+
if (existsSync(path)) {
|
|
21
|
+
try {
|
|
22
|
+
const content = readFileSync(path, 'utf8');
|
|
23
|
+
userConfig = (yaml.load(content) as Record<string, unknown>) || {};
|
|
24
|
+
break;
|
|
25
|
+
} catch (error) {
|
|
26
|
+
console.warn(`Warning: Failed to load config from ${path}:`, error);
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
const result = ConfigSchema.safeParse(userConfig);
|
|
32
|
+
if (!result.success) {
|
|
33
|
+
console.warn('Warning: Invalid configuration, using defaults:', result.error.message);
|
|
34
|
+
ConfigLoader.instance = ConfigSchema.parse({});
|
|
35
|
+
} else {
|
|
36
|
+
ConfigLoader.instance = result.data;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
return ConfigLoader.instance;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* For testing purposes, manually set the configuration
|
|
44
|
+
*/
|
|
45
|
+
static setConfig(config: Config): void {
|
|
46
|
+
ConfigLoader.instance = config;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* For testing purposes, clear the cached configuration
|
|
51
|
+
*/
|
|
52
|
+
static clear(): void {
|
|
53
|
+
// @ts-ignore - allowing clearing for tests
|
|
54
|
+
ConfigLoader.instance = undefined;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
static getProviderForModel(model: string): string {
|
|
58
|
+
const config = ConfigLoader.load();
|
|
59
|
+
|
|
60
|
+
// Check for provider prefix (e.g. "copilot:gpt-4o")
|
|
61
|
+
if (model.includes(':')) {
|
|
62
|
+
const [provider] = model.split(':');
|
|
63
|
+
if (config.providers[provider]) {
|
|
64
|
+
return provider;
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
// Check explicit mappings first
|
|
69
|
+
if (config.model_mappings[model]) {
|
|
70
|
+
return config.model_mappings[model];
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// Check glob-style mappings (very basic)
|
|
74
|
+
for (const [pattern, provider] of Object.entries(config.model_mappings)) {
|
|
75
|
+
if (pattern.endsWith('*')) {
|
|
76
|
+
const prefix = pattern.slice(0, -1);
|
|
77
|
+
if (model.startsWith(prefix)) {
|
|
78
|
+
return provider;
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
return config.default_provider;
|
|
84
|
+
}
|
|
85
|
+
}
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import { describe, expect, it, mock } from 'bun:test';
|
|
2
|
+
import type { Workflow } from '../parser/schema';
|
|
3
|
+
import { generateMermaidGraph } from './mermaid';
|
|
4
|
+
|
|
5
|
+
describe('mermaid', () => {
|
|
6
|
+
it('should generate a mermaid graph from a workflow', () => {
|
|
7
|
+
const workflow: Workflow = {
|
|
8
|
+
name: 'test',
|
|
9
|
+
steps: [
|
|
10
|
+
{ id: 's1', type: 'shell', run: 'echo 1', needs: [] },
|
|
11
|
+
{ id: 's2', type: 'llm', agent: 'my-agent', prompt: 'hi', needs: ['s1'] },
|
|
12
|
+
{ id: 's3', type: 'human', message: 'ok?', needs: ['s2'], if: 'true' },
|
|
13
|
+
],
|
|
14
|
+
} as unknown as Workflow;
|
|
15
|
+
|
|
16
|
+
const graph = generateMermaidGraph(workflow);
|
|
17
|
+
expect(graph).toContain('graph TD');
|
|
18
|
+
expect(graph).toContain('s1["s1\\n(shell)"]:::shell');
|
|
19
|
+
expect(graph).toContain('s2["s2\\nš¤ my-agent"]:::ai');
|
|
20
|
+
expect(graph).toContain('s3["s3\\n(human)\\nā Conditional"]:::human');
|
|
21
|
+
expect(graph).toContain('s1 --> s2');
|
|
22
|
+
expect(graph).toContain('s2 --> s3');
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
it('should handle loops in labeling', () => {
|
|
26
|
+
const workflow: Workflow = {
|
|
27
|
+
name: 'loop',
|
|
28
|
+
steps: [{ id: 'l1', type: 'shell', run: 'echo', foreach: '[1,2]', needs: [] }],
|
|
29
|
+
} as unknown as Workflow;
|
|
30
|
+
const graph = generateMermaidGraph(workflow);
|
|
31
|
+
expect(graph).toContain('(š Loop)');
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
it('should render mermaid as ascii', async () => {
|
|
35
|
+
const originalFetch = global.fetch;
|
|
36
|
+
// @ts-ignore
|
|
37
|
+
global.fetch = mock(() =>
|
|
38
|
+
Promise.resolve(
|
|
39
|
+
new Response('ascii graph', {
|
|
40
|
+
status: 200,
|
|
41
|
+
})
|
|
42
|
+
)
|
|
43
|
+
);
|
|
44
|
+
|
|
45
|
+
const { renderMermaidAsAscii } = await import('./mermaid');
|
|
46
|
+
const result = await renderMermaidAsAscii('graph TD\n A --> B');
|
|
47
|
+
expect(result).toBe('ascii graph');
|
|
48
|
+
|
|
49
|
+
global.fetch = originalFetch;
|
|
50
|
+
});
|
|
51
|
+
});
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import type { Workflow } from '../parser/schema';
|
|
2
|
+
|
|
3
|
+
export function generateMermaidGraph(workflow: Workflow): string {
|
|
4
|
+
const lines = ['graph TD'];
|
|
5
|
+
|
|
6
|
+
// 1. Add Nodes
|
|
7
|
+
for (const step of workflow.steps) {
|
|
8
|
+
// Sanitize ID for Mermaid
|
|
9
|
+
const safeId = step.id.replace(/[^a-zA-Z0-9_]/g, '_');
|
|
10
|
+
|
|
11
|
+
// Add type icon/label
|
|
12
|
+
let label = `${step.id}\\n(${step.type})`;
|
|
13
|
+
|
|
14
|
+
// Add specific details based on type
|
|
15
|
+
if (step.type === 'llm') label = `${step.id}\\nš¤ ${step.agent}`;
|
|
16
|
+
if (step.foreach) label += '\\n(š Loop)';
|
|
17
|
+
if (step.if) label += '\\nā Conditional';
|
|
18
|
+
|
|
19
|
+
// Styling based on type
|
|
20
|
+
let style = '';
|
|
21
|
+
switch (step.type) {
|
|
22
|
+
case 'llm':
|
|
23
|
+
style = ':::ai';
|
|
24
|
+
break;
|
|
25
|
+
case 'human':
|
|
26
|
+
style = ':::human';
|
|
27
|
+
break;
|
|
28
|
+
case 'shell':
|
|
29
|
+
style = ':::shell';
|
|
30
|
+
break;
|
|
31
|
+
default:
|
|
32
|
+
style = ':::default';
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
lines.push(` ${safeId}["${label}"]${style}`);
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// 2. Add Edges (Dependencies)
|
|
39
|
+
for (const step of workflow.steps) {
|
|
40
|
+
const safeId = step.id.replace(/[^a-zA-Z0-9_]/g, '_');
|
|
41
|
+
|
|
42
|
+
if (step.needs && step.needs.length > 0) {
|
|
43
|
+
for (const need of step.needs) {
|
|
44
|
+
const safeNeed = need.replace(/[^a-zA-Z0-9_]/g, '_');
|
|
45
|
+
lines.push(` ${safeNeed} --> ${safeId}`);
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// 3. Define Styles
|
|
51
|
+
lines.push(' classDef ai fill:#e1f5fe,stroke:#01579b,stroke-width:2px;');
|
|
52
|
+
lines.push(
|
|
53
|
+
' classDef human fill:#fff3e0,stroke:#e65100,stroke-width:2px,stroke-dasharray: 5 5;'
|
|
54
|
+
);
|
|
55
|
+
lines.push(' classDef shell fill:#f3e5f5,stroke:#4a148c,stroke-width:1px;');
|
|
56
|
+
lines.push(' classDef default fill:#fff,stroke:#333,stroke-width:1px;');
|
|
57
|
+
|
|
58
|
+
return lines.join('\n');
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Renders a Mermaid graph as ASCII using mermaid-ascii.art
|
|
63
|
+
*/
|
|
64
|
+
export async function renderMermaidAsAscii(mermaid: string): Promise<string | null> {
|
|
65
|
+
try {
|
|
66
|
+
const response = await fetch('https://mermaid-ascii.art', {
|
|
67
|
+
method: 'POST',
|
|
68
|
+
headers: {
|
|
69
|
+
'Content-Type': 'application/x-www-form-urlencoded',
|
|
70
|
+
},
|
|
71
|
+
body: `mermaid=${encodeURIComponent(mermaid)}`,
|
|
72
|
+
});
|
|
73
|
+
|
|
74
|
+
if (!response.ok) {
|
|
75
|
+
return null;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
const ascii = await response.text();
|
|
79
|
+
if (ascii.includes('Failed to render diagram')) {
|
|
80
|
+
return null;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
return ascii;
|
|
84
|
+
} catch {
|
|
85
|
+
return null;
|
|
86
|
+
}
|
|
87
|
+
}
|