keystone-cli 0.2.0 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +30 -12
- package/package.json +20 -4
- package/src/cli.ts +171 -27
- package/src/expression/evaluator.test.ts +4 -0
- package/src/expression/evaluator.ts +9 -1
- package/src/parser/agent-parser.ts +11 -4
- package/src/parser/config-schema.ts +11 -0
- package/src/parser/schema.ts +20 -10
- package/src/parser/workflow-parser.ts +5 -4
- package/src/runner/llm-executor.test.ts +174 -81
- package/src/runner/llm-executor.ts +8 -3
- package/src/runner/mcp-client.test.ts +85 -47
- package/src/runner/mcp-client.ts +235 -42
- package/src/runner/mcp-manager.ts +42 -2
- package/src/runner/mcp-server.test.ts +22 -15
- package/src/runner/mcp-server.ts +21 -4
- package/src/runner/step-executor.test.ts +51 -8
- package/src/runner/step-executor.ts +69 -7
- package/src/runner/workflow-runner.ts +65 -24
- package/src/utils/auth-manager.test.ts +86 -0
- package/src/utils/auth-manager.ts +89 -0
- package/src/utils/config-loader.test.ts +30 -0
- package/src/utils/config-loader.ts +11 -1
- package/src/utils/mermaid.test.ts +18 -18
- package/src/utils/mermaid.ts +154 -20
- package/src/utils/redactor.test.ts +6 -0
- package/src/utils/redactor.ts +10 -1
- package/src/utils/sandbox.test.ts +29 -0
- package/src/utils/sandbox.ts +61 -0
package/README.md
CHANGED
|
@@ -80,6 +80,11 @@ Add your API keys to the generated `.env` file:
|
|
|
80
80
|
OPENAI_API_KEY=sk-...
|
|
81
81
|
ANTHROPIC_API_KEY=sk-ant-...
|
|
82
82
|
```
|
|
83
|
+
Alternatively, you can use the built-in authentication management:
|
|
84
|
+
```bash
|
|
85
|
+
keystone auth login openai
|
|
86
|
+
keystone auth login anthropic
|
|
87
|
+
```
|
|
83
88
|
|
|
84
89
|
### 3. Run a Workflow
|
|
85
90
|
```bash
|
|
@@ -131,8 +136,8 @@ mcp_servers:
|
|
|
131
136
|
github:
|
|
132
137
|
command: npx
|
|
133
138
|
args: ["-y", "@modelcontextprotocol/server-github"]
|
|
134
|
-
|
|
135
|
-
|
|
139
|
+
env:
|
|
140
|
+
GITHUB_PERSONAL_ACCESS_TOKEN: "your-github-pat" # Or omit if GITHUB_TOKEN is in your .env
|
|
136
141
|
|
|
137
142
|
storage:
|
|
138
143
|
|
|
@@ -175,7 +180,7 @@ You can add any OpenAI-compatible provider (Groq, Together AI, Perplexity, Local
|
|
|
175
180
|
Keystone supports using your GitHub Copilot subscription directly. To authenticate (using the GitHub Device Flow):
|
|
176
181
|
|
|
177
182
|
```bash
|
|
178
|
-
keystone auth login
|
|
183
|
+
keystone auth login github
|
|
179
184
|
```
|
|
180
185
|
|
|
181
186
|
Then, you can use Copilot in your configuration:
|
|
@@ -187,10 +192,18 @@ providers:
|
|
|
187
192
|
default_model: gpt-4o
|
|
188
193
|
```
|
|
189
194
|
|
|
190
|
-
Authentication tokens for Copilot are managed automatically after the initial login.
|
|
195
|
+
Authentication tokens for Copilot are managed automatically after the initial login.
|
|
196
|
+
|
|
197
|
+
### API Key Management
|
|
198
|
+
|
|
199
|
+
For other providers, you can either store API keys in a `.env` file in your project root:
|
|
191
200
|
- `OPENAI_API_KEY`
|
|
192
201
|
- `ANTHROPIC_API_KEY`
|
|
193
202
|
|
|
203
|
+
Or use the `keystone auth login` command to securely store them in your local machine's configuration:
|
|
204
|
+
- `keystone auth login openai`
|
|
205
|
+
- `keystone auth login anthropic`
|
|
206
|
+
|
|
194
207
|
---
|
|
195
208
|
|
|
196
209
|
## 📝 Workflow Example
|
|
@@ -252,6 +265,7 @@ Keystone supports several specialized step types:
|
|
|
252
265
|
- `inputType: confirm`: Simple Enter-to-continue prompt.
|
|
253
266
|
- `inputType: text`: Prompt for a string input, available via `${{ steps.id.output }}`.
|
|
254
267
|
- `workflow`: Trigger another workflow as a sub-step.
|
|
268
|
+
- `script`: Run arbitrary JavaScript in a secure sandbox (`isolated-vm` with fallback to `node:vm`).
|
|
255
269
|
- `sleep`: Pause execution for a specified duration.
|
|
256
270
|
|
|
257
271
|
All steps support common features like `needs` (dependencies), `if` (conditionals), `retry`, `timeout`, `foreach` (parallel iteration), and `transform` (post-process output using expressions).
|
|
@@ -314,7 +328,7 @@ You are a software developer. You can use tools to explore the codebase.
|
|
|
314
328
|
Keystone can itself act as an MCP server, allowing other agents (like Claude Desktop or GitHub Copilot) to discover and run your workflows as tools.
|
|
315
329
|
|
|
316
330
|
```bash
|
|
317
|
-
keystone mcp
|
|
331
|
+
keystone mcp start
|
|
318
332
|
```
|
|
319
333
|
|
|
320
334
|
> **Note:** Workflow execution via the Keystone MCP server is synchronous. This provides a better experience for agents as they receive the final results directly, though it means the connection remains open for the duration of the workflow run.
|
|
@@ -332,10 +346,13 @@ mcp_servers:
|
|
|
332
346
|
command: npx
|
|
333
347
|
args: ["-y", "@modelcontextprotocol/server-filesystem", "/path/to/allowed/directory"]
|
|
334
348
|
|
|
335
|
-
# Remote server (
|
|
349
|
+
# Remote server (via proxy)
|
|
336
350
|
atlassian:
|
|
337
|
-
type:
|
|
338
|
-
|
|
351
|
+
type: local
|
|
352
|
+
command: npx
|
|
353
|
+
args: ["-y", "mcp-remote", "https://mcp.atlassian.com/v1/sse"]
|
|
354
|
+
oauth:
|
|
355
|
+
scope: tools:read
|
|
339
356
|
```
|
|
340
357
|
|
|
341
358
|
#### Using MCP in Steps
|
|
@@ -376,11 +393,12 @@ In these examples, the agent will have access to all tools provided by the MCP s
|
|
|
376
393
|
| `logs <run_id>` | View logs and step status for a specific run |
|
|
377
394
|
| `graph <workflow>` | Generate a Mermaid diagram of the workflow |
|
|
378
395
|
| `config` | Show current configuration and providers |
|
|
379
|
-
| `auth status` | Show authentication status |
|
|
380
|
-
| `auth login` | Login to an authentication provider (
|
|
381
|
-
| `auth logout` | Logout and clear authentication tokens |
|
|
396
|
+
| `auth status [provider]` | Show authentication status |
|
|
397
|
+
| `auth login [provider]` | Login to an authentication provider (github, openai, anthropic) |
|
|
398
|
+
| `auth logout [provider]` | Logout and clear authentication tokens |
|
|
382
399
|
| `ui` | Open the interactive TUI dashboard |
|
|
383
|
-
| `mcp` | Start the Keystone MCP server |
|
|
400
|
+
| `mcp start` | Start the Keystone MCP server |
|
|
401
|
+
| `mcp login <server>` | Login to a remote MCP server |
|
|
384
402
|
| `completion [shell]` | Generate shell completion script (zsh, bash) |
|
|
385
403
|
| `prune [--days N]` | Cleanup old run data from the database |
|
|
386
404
|
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "keystone-cli",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.3.1",
|
|
4
4
|
"description": "A local-first, declarative, agentic workflow orchestrator built on Bun",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|
|
@@ -13,7 +13,13 @@
|
|
|
13
13
|
"lint:fix": "biome check --write .",
|
|
14
14
|
"format": "biome format --write ."
|
|
15
15
|
},
|
|
16
|
-
"keywords": [
|
|
16
|
+
"keywords": [
|
|
17
|
+
"workflow",
|
|
18
|
+
"orchestrator",
|
|
19
|
+
"agentic",
|
|
20
|
+
"automation",
|
|
21
|
+
"bun"
|
|
22
|
+
],
|
|
17
23
|
"author": "Mark Hingston",
|
|
18
24
|
"license": "MIT",
|
|
19
25
|
"repository": {
|
|
@@ -21,15 +27,22 @@
|
|
|
21
27
|
"url": "https://github.com/mhingston/keystone-cli.git"
|
|
22
28
|
},
|
|
23
29
|
"homepage": "https://github.com/mhingston/keystone-cli#readme",
|
|
24
|
-
"files": [
|
|
30
|
+
"files": [
|
|
31
|
+
"src",
|
|
32
|
+
"README.md",
|
|
33
|
+
"LICENSE",
|
|
34
|
+
"logo.png"
|
|
35
|
+
],
|
|
25
36
|
"dependencies": {
|
|
26
37
|
"@jsep-plugin/arrow": "^1.0.6",
|
|
27
38
|
"@jsep-plugin/object": "^1.2.2",
|
|
28
39
|
"@types/react": "^19.2.7",
|
|
29
40
|
"commander": "^12.1.0",
|
|
41
|
+
"dagre": "^0.8.5",
|
|
30
42
|
"ink": "^6.5.1",
|
|
31
43
|
"ink-select-input": "3.1.2",
|
|
32
44
|
"ink-spinner": "^5.0.0",
|
|
45
|
+
"isolated-vm": "^6.0.2",
|
|
33
46
|
"js-yaml": "^4.1.0",
|
|
34
47
|
"jsep": "^1.4.0",
|
|
35
48
|
"react": "^19.2.3",
|
|
@@ -37,7 +50,10 @@
|
|
|
37
50
|
},
|
|
38
51
|
"devDependencies": {
|
|
39
52
|
"@biomejs/biome": "^1.9.4",
|
|
40
|
-
"@types/
|
|
53
|
+
"@types/bun": "^1.3.5",
|
|
54
|
+
"@types/dagre": "^0.7.53",
|
|
55
|
+
"@types/js-yaml": "^4.0.9",
|
|
56
|
+
"@types/node": "^25.0.3"
|
|
41
57
|
},
|
|
42
58
|
"engines": {
|
|
43
59
|
"bun": ">=1.0.0"
|
package/src/cli.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
#!/usr/bin/env bun
|
|
2
2
|
import { existsSync, mkdirSync, writeFileSync } from 'node:fs';
|
|
3
|
-
import { join } from 'node:path';
|
|
3
|
+
import { dirname, join } from 'node:path';
|
|
4
4
|
import { Command } from 'commander';
|
|
5
5
|
|
|
6
6
|
import exploreAgent from './templates/agents/explore.md' with { type: 'text' };
|
|
@@ -12,7 +12,7 @@ import scaffoldWorkflow from './templates/scaffold-feature.yaml' with { type: 't
|
|
|
12
12
|
import { WorkflowDb } from './db/workflow-db.ts';
|
|
13
13
|
import { WorkflowParser } from './parser/workflow-parser.ts';
|
|
14
14
|
import { ConfigLoader } from './utils/config-loader.ts';
|
|
15
|
-
import { generateMermaidGraph,
|
|
15
|
+
import { generateMermaidGraph, renderWorkflowAsAscii } from './utils/mermaid.ts';
|
|
16
16
|
import { WorkflowRegistry } from './utils/workflow-registry.ts';
|
|
17
17
|
|
|
18
18
|
import pkg from '../package.json' with { type: 'json' };
|
|
@@ -204,12 +204,11 @@ program
|
|
|
204
204
|
try {
|
|
205
205
|
const resolvedPath = WorkflowRegistry.resolvePath(workflowPath);
|
|
206
206
|
const workflow = WorkflowParser.loadWorkflow(resolvedPath);
|
|
207
|
-
const
|
|
208
|
-
|
|
209
|
-
const ascii = await renderMermaidAsAscii(mermaid);
|
|
207
|
+
const ascii = renderWorkflowAsAscii(workflow);
|
|
210
208
|
if (ascii) {
|
|
211
209
|
console.log(`\n${ascii}\n`);
|
|
212
210
|
} else {
|
|
211
|
+
const mermaid = generateMermaidGraph(workflow);
|
|
213
212
|
console.log('\n```mermaid');
|
|
214
213
|
console.log(mermaid);
|
|
215
214
|
console.log('```\n');
|
|
@@ -265,7 +264,7 @@ program
|
|
|
265
264
|
|
|
266
265
|
// Import WorkflowRunner dynamically
|
|
267
266
|
const { WorkflowRunner } = await import('./runner/workflow-runner.ts');
|
|
268
|
-
const runner = new WorkflowRunner(workflow, { inputs });
|
|
267
|
+
const runner = new WorkflowRunner(workflow, { inputs, workflowDir: dirname(resolvedPath) });
|
|
269
268
|
|
|
270
269
|
const outputs = await runner.run();
|
|
271
270
|
|
|
@@ -273,6 +272,7 @@ program
|
|
|
273
272
|
console.log('Outputs:');
|
|
274
273
|
console.log(JSON.stringify(runner.redact(outputs), null, 2));
|
|
275
274
|
}
|
|
275
|
+
process.exit(0);
|
|
276
276
|
} catch (error) {
|
|
277
277
|
console.error(
|
|
278
278
|
'✗ Failed to execute workflow:',
|
|
@@ -339,7 +339,10 @@ program
|
|
|
339
339
|
|
|
340
340
|
// Import WorkflowRunner dynamically
|
|
341
341
|
const { WorkflowRunner } = await import('./runner/workflow-runner.ts');
|
|
342
|
-
const runner = new WorkflowRunner(workflow, {
|
|
342
|
+
const runner = new WorkflowRunner(workflow, {
|
|
343
|
+
resumeRunId: runId,
|
|
344
|
+
workflowDir: dirname(workflowPath),
|
|
345
|
+
});
|
|
343
346
|
|
|
344
347
|
const outputs = await runner.run();
|
|
345
348
|
|
|
@@ -347,6 +350,7 @@ program
|
|
|
347
350
|
console.log('Outputs:');
|
|
348
351
|
console.log(JSON.stringify(runner.redact(outputs), null, 2));
|
|
349
352
|
}
|
|
353
|
+
process.exit(0);
|
|
350
354
|
} catch (error) {
|
|
351
355
|
console.error('✗ Failed to resume workflow:', error instanceof Error ? error.message : error);
|
|
352
356
|
process.exit(1);
|
|
@@ -480,9 +484,77 @@ program
|
|
|
480
484
|
});
|
|
481
485
|
|
|
482
486
|
// ===== keystone mcp =====
|
|
483
|
-
program
|
|
484
|
-
|
|
485
|
-
|
|
487
|
+
const mcp = program.command('mcp').description('Model Context Protocol management');
|
|
488
|
+
|
|
489
|
+
mcp
|
|
490
|
+
.command('login')
|
|
491
|
+
.description('Login to an MCP server')
|
|
492
|
+
.argument('<server>', 'Server name (from config)')
|
|
493
|
+
.action(async (serverName) => {
|
|
494
|
+
const { ConfigLoader } = await import('./utils/config-loader.ts');
|
|
495
|
+
const { AuthManager } = await import('./utils/auth-manager.ts');
|
|
496
|
+
|
|
497
|
+
const config = ConfigLoader.load();
|
|
498
|
+
const server = config.mcp_servers[serverName];
|
|
499
|
+
|
|
500
|
+
if (!server || !server.oauth) {
|
|
501
|
+
console.error(`✗ MCP server '${serverName}' is not configured with OAuth.`);
|
|
502
|
+
process.exit(1);
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
let url = server.url;
|
|
506
|
+
|
|
507
|
+
// If it's a local server using mcp-remote, try to find the URL in args
|
|
508
|
+
if (!url && server.type === 'local' && server.args) {
|
|
509
|
+
url = server.args.find((arg) => arg.startsWith('http'));
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
if (!url) {
|
|
513
|
+
console.error(
|
|
514
|
+
`✗ MCP server '${serverName}' does not have a URL configured for authentication.`
|
|
515
|
+
);
|
|
516
|
+
console.log(' Please add a "url" property to your server configuration.');
|
|
517
|
+
process.exit(1);
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
console.log(`\n🔐 Authenticating with MCP server: ${serverName}`);
|
|
521
|
+
console.log(` URL: ${url}\n`);
|
|
522
|
+
|
|
523
|
+
// For now, we'll support a manual token entry until we have a full browser redirect flow
|
|
524
|
+
// Most MCP OAuth servers provide a way to get a token via a URL
|
|
525
|
+
const authUrl = url.replace('/sse', '/authorize') || url;
|
|
526
|
+
console.log('1. Visit the following URL to authorize:');
|
|
527
|
+
console.log(` ${authUrl}`);
|
|
528
|
+
console.log(
|
|
529
|
+
'\n Note: If you encounter errors, ensure the server is correctly configured and accessible.'
|
|
530
|
+
);
|
|
531
|
+
console.log(' You can still manually provide an OAuth token below if you have one.');
|
|
532
|
+
console.log('\n2. Paste the access token below:\n');
|
|
533
|
+
|
|
534
|
+
const prompt = 'Access Token: ';
|
|
535
|
+
process.stdout.write(prompt);
|
|
536
|
+
|
|
537
|
+
let token = '';
|
|
538
|
+
for await (const line of console) {
|
|
539
|
+
token = line.trim();
|
|
540
|
+
break;
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
if (token) {
|
|
544
|
+
const auth = AuthManager.load();
|
|
545
|
+
const mcp_tokens = auth.mcp_tokens || {};
|
|
546
|
+
mcp_tokens[serverName] = { access_token: token };
|
|
547
|
+
AuthManager.save({ mcp_tokens });
|
|
548
|
+
console.log(`\n✓ Successfully saved token for MCP server: ${serverName}`);
|
|
549
|
+
} else {
|
|
550
|
+
console.error('✗ No token provided.');
|
|
551
|
+
process.exit(1);
|
|
552
|
+
}
|
|
553
|
+
});
|
|
554
|
+
|
|
555
|
+
mcp
|
|
556
|
+
.command('start')
|
|
557
|
+
.description('Start the Keystone MCP server (to use Keystone as a tool)')
|
|
486
558
|
.action(async () => {
|
|
487
559
|
const { MCPServer } = await import('./runner/mcp-server.ts');
|
|
488
560
|
|
|
@@ -541,28 +613,49 @@ const auth = program.command('auth').description('Authentication management');
|
|
|
541
613
|
auth
|
|
542
614
|
.command('login')
|
|
543
615
|
.description('Login to an authentication provider')
|
|
544
|
-
.
|
|
616
|
+
.argument('[provider]', 'Authentication provider', 'github')
|
|
617
|
+
.option(
|
|
618
|
+
'-p, --provider <provider>',
|
|
619
|
+
'Authentication provider (deprecated, use positional argument)'
|
|
620
|
+
)
|
|
545
621
|
.option('-t, --token <token>', 'Personal Access Token (if not using interactive mode)')
|
|
546
|
-
.action(async (options) => {
|
|
622
|
+
.action(async (providerArg, options) => {
|
|
547
623
|
const { AuthManager } = await import('./utils/auth-manager.ts');
|
|
548
|
-
const provider = options.provider.toLowerCase();
|
|
624
|
+
const provider = (options.provider || providerArg).toLowerCase();
|
|
549
625
|
|
|
550
626
|
if (provider === 'github') {
|
|
551
627
|
let token = options.token;
|
|
552
628
|
|
|
553
629
|
if (!token) {
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
'1. Generate a Personal Access Token (Classic) with "copilot" scope (or full repo access).'
|
|
557
|
-
);
|
|
558
|
-
console.log(' https://github.com/settings/tokens/new');
|
|
559
|
-
console.log('2. Paste the token below:\n');
|
|
630
|
+
try {
|
|
631
|
+
const deviceLogin = await AuthManager.initGitHubDeviceLogin();
|
|
560
632
|
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
633
|
+
console.log('\nTo login with GitHub:');
|
|
634
|
+
console.log(`1. Visit: ${deviceLogin.verification_uri}`);
|
|
635
|
+
console.log(`2. Enter code: ${deviceLogin.user_code}\n`);
|
|
636
|
+
|
|
637
|
+
console.log('Waiting for authorization...');
|
|
638
|
+
token = await AuthManager.pollGitHubDeviceLogin(deviceLogin.device_code);
|
|
639
|
+
} catch (error) {
|
|
640
|
+
console.error(
|
|
641
|
+
'\n✗ Failed to login with GitHub device flow:',
|
|
642
|
+
error instanceof Error ? error.message : error
|
|
643
|
+
);
|
|
644
|
+
console.log('\nFalling back to manual token entry...');
|
|
645
|
+
|
|
646
|
+
console.log('\nTo login with GitHub manually:');
|
|
647
|
+
console.log(
|
|
648
|
+
'1. Generate a Personal Access Token (Classic) with "copilot" scope (or full repo access).'
|
|
649
|
+
);
|
|
650
|
+
console.log(' https://github.com/settings/tokens/new');
|
|
651
|
+
console.log('2. Paste the token below:\n');
|
|
652
|
+
|
|
653
|
+
const prompt = 'Token: ';
|
|
654
|
+
process.stdout.write(prompt);
|
|
655
|
+
for await (const line of console) {
|
|
656
|
+
token = line.trim();
|
|
657
|
+
break;
|
|
658
|
+
}
|
|
566
659
|
}
|
|
567
660
|
}
|
|
568
661
|
|
|
@@ -585,6 +678,31 @@ auth
|
|
|
585
678
|
console.error('✗ No token provided.');
|
|
586
679
|
process.exit(1);
|
|
587
680
|
}
|
|
681
|
+
} else if (provider === 'openai' || provider === 'anthropic') {
|
|
682
|
+
let key = options.token; // Use --token if provided as the API key
|
|
683
|
+
|
|
684
|
+
if (!key) {
|
|
685
|
+
console.log(`\n🔑 Login to ${provider.toUpperCase()}`);
|
|
686
|
+
console.log(` Please provide your ${provider.toUpperCase()} API key.\n`);
|
|
687
|
+
const prompt = 'API Key: ';
|
|
688
|
+
process.stdout.write(prompt);
|
|
689
|
+
for await (const line of console) {
|
|
690
|
+
key = line.trim();
|
|
691
|
+
break;
|
|
692
|
+
}
|
|
693
|
+
}
|
|
694
|
+
|
|
695
|
+
if (key) {
|
|
696
|
+
if (provider === 'openai') {
|
|
697
|
+
AuthManager.save({ openai_api_key: key });
|
|
698
|
+
} else {
|
|
699
|
+
AuthManager.save({ anthropic_api_key: key });
|
|
700
|
+
}
|
|
701
|
+
console.log(`\n✓ Successfully saved ${provider.toUpperCase()} API key.`);
|
|
702
|
+
} else {
|
|
703
|
+
console.error('✗ No API key provided.');
|
|
704
|
+
process.exit(1);
|
|
705
|
+
}
|
|
588
706
|
} else {
|
|
589
707
|
console.error(`✗ Unsupported provider: ${provider}`);
|
|
590
708
|
process.exit(1);
|
|
@@ -612,13 +730,33 @@ auth
|
|
|
612
730
|
}
|
|
613
731
|
} else if (provider) {
|
|
614
732
|
console.log(
|
|
615
|
-
` ⊘ Not logged into GitHub. Run "keystone auth login
|
|
733
|
+
` ⊘ Not logged into GitHub. Run "keystone auth login github" to authenticate.`
|
|
734
|
+
);
|
|
735
|
+
}
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
if (!provider || provider === 'openai') {
|
|
739
|
+
if (auth.openai_api_key) {
|
|
740
|
+
console.log(' ✓ OpenAI API key configured');
|
|
741
|
+
} else if (provider) {
|
|
742
|
+
console.log(
|
|
743
|
+
` ⊘ OpenAI API key not configured. Run "keystone auth login openai" to authenticate.`
|
|
744
|
+
);
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
if (!provider || provider === 'anthropic') {
|
|
749
|
+
if (auth.anthropic_api_key) {
|
|
750
|
+
console.log(' ✓ Anthropic API key configured');
|
|
751
|
+
} else if (provider) {
|
|
752
|
+
console.log(
|
|
753
|
+
` ⊘ Anthropic API key not configured. Run "keystone auth login anthropic" to authenticate.`
|
|
616
754
|
);
|
|
617
755
|
}
|
|
618
756
|
}
|
|
619
757
|
|
|
620
|
-
if (!auth.github_token && !provider) {
|
|
621
|
-
console.log(' ⊘
|
|
758
|
+
if (!auth.github_token && !auth.openai_api_key && !auth.anthropic_api_key && !provider) {
|
|
759
|
+
console.log(' ⊘ No providers configured. Run "keystone auth login" to authenticate.');
|
|
622
760
|
}
|
|
623
761
|
});
|
|
624
762
|
|
|
@@ -641,6 +779,12 @@ auth
|
|
|
641
779
|
copilot_expires_at: undefined,
|
|
642
780
|
});
|
|
643
781
|
console.log('✓ Successfully logged out of GitHub.');
|
|
782
|
+
} else if (provider === 'openai') {
|
|
783
|
+
AuthManager.save({ openai_api_key: undefined });
|
|
784
|
+
console.log('✓ Successfully cleared OpenAI API key.');
|
|
785
|
+
} else if (provider === 'anthropic') {
|
|
786
|
+
AuthManager.save({ anthropic_api_key: undefined });
|
|
787
|
+
console.log('✓ Successfully cleared Anthropic API key.');
|
|
644
788
|
} else {
|
|
645
789
|
console.error(`✗ Unknown provider: ${provider}`);
|
|
646
790
|
process.exit(1);
|
|
@@ -59,6 +59,10 @@ describe('ExpressionEvaluator', () => {
|
|
|
59
59
|
expect(ExpressionEvaluator.evaluate('${{ false && 1 }}', context)).toBe(false);
|
|
60
60
|
expect(ExpressionEvaluator.evaluate('${{ true || 1 }}', context)).toBe(true);
|
|
61
61
|
expect(ExpressionEvaluator.evaluate('${{ false || 1 }}', context)).toBe(1);
|
|
62
|
+
// Explicit short-circuit tests
|
|
63
|
+
expect(ExpressionEvaluator.evaluate('${{ false && undefined_var }}', context)).toBe(false);
|
|
64
|
+
expect(ExpressionEvaluator.evaluate('${{ true || undefined_var }}', context)).toBe(true);
|
|
65
|
+
expect(ExpressionEvaluator.evaluate('${{ true && 2 }}', context)).toBe(2);
|
|
62
66
|
});
|
|
63
67
|
|
|
64
68
|
test('should support comparison operators', () => {
|
|
@@ -83,7 +83,15 @@ export class ExpressionEvaluator {
|
|
|
83
83
|
return '';
|
|
84
84
|
}
|
|
85
85
|
|
|
86
|
-
if (typeof result === 'object') {
|
|
86
|
+
if (typeof result === 'object' && result !== null) {
|
|
87
|
+
// Special handling for shell command results to avoid [object Object] or JSON in commands
|
|
88
|
+
if (
|
|
89
|
+
'stdout' in result &&
|
|
90
|
+
'exitCode' in result &&
|
|
91
|
+
typeof (result as Record<string, unknown>).stdout === 'string'
|
|
92
|
+
) {
|
|
93
|
+
return (result as Record<string, unknown>).stdout.trim();
|
|
94
|
+
}
|
|
87
95
|
return JSON.stringify(result, null, 2);
|
|
88
96
|
}
|
|
89
97
|
|
|
@@ -44,11 +44,18 @@ export function parseAgent(filePath: string): Agent {
|
|
|
44
44
|
return result.data;
|
|
45
45
|
}
|
|
46
46
|
|
|
47
|
-
export function resolveAgentPath(agentName: string): string {
|
|
48
|
-
const possiblePaths = [
|
|
47
|
+
export function resolveAgentPath(agentName: string, baseDir?: string): string {
|
|
48
|
+
const possiblePaths: string[] = [];
|
|
49
|
+
|
|
50
|
+
if (baseDir) {
|
|
51
|
+
possiblePaths.push(join(baseDir, 'agents', `${agentName}.md`));
|
|
52
|
+
possiblePaths.push(join(baseDir, '..', 'agents', `${agentName}.md`));
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
possiblePaths.push(
|
|
49
56
|
join(process.cwd(), '.keystone', 'workflows', 'agents', `${agentName}.md`),
|
|
50
|
-
join(homedir(), '.keystone', 'workflows', 'agents', `${agentName}.md`)
|
|
51
|
-
|
|
57
|
+
join(homedir(), '.keystone', 'workflows', 'agents', `${agentName}.md`)
|
|
58
|
+
);
|
|
52
59
|
|
|
53
60
|
for (const path of possiblePaths) {
|
|
54
61
|
if (existsSync(path)) {
|
|
@@ -48,11 +48,22 @@ export const ConfigSchema = z.object({
|
|
|
48
48
|
command: z.string(),
|
|
49
49
|
args: z.array(z.string()).optional(),
|
|
50
50
|
env: z.record(z.string()).optional(),
|
|
51
|
+
url: z.string().url().optional(),
|
|
52
|
+
oauth: z
|
|
53
|
+
.object({
|
|
54
|
+
scope: z.string().optional(),
|
|
55
|
+
})
|
|
56
|
+
.optional(),
|
|
51
57
|
}),
|
|
52
58
|
z.object({
|
|
53
59
|
type: z.literal('remote'),
|
|
54
60
|
url: z.string().url(),
|
|
55
61
|
headers: z.record(z.string()).optional(),
|
|
62
|
+
oauth: z
|
|
63
|
+
.object({
|
|
64
|
+
scope: z.string().optional(),
|
|
65
|
+
})
|
|
66
|
+
.optional(),
|
|
56
67
|
}),
|
|
57
68
|
])
|
|
58
69
|
)
|
package/src/parser/schema.ts
CHANGED
|
@@ -3,7 +3,7 @@ import { z } from 'zod';
|
|
|
3
3
|
// ===== Input/Output Schema =====
|
|
4
4
|
|
|
5
5
|
const InputSchema = z.object({
|
|
6
|
-
type: z.string
|
|
6
|
+
type: z.enum(['string', 'number', 'boolean', 'array', 'object']),
|
|
7
7
|
default: z.any().optional(),
|
|
8
8
|
description: z.string().optional(),
|
|
9
9
|
});
|
|
@@ -105,17 +105,26 @@ const SleepStepSchema = BaseStepSchema.extend({
|
|
|
105
105
|
duration: z.union([z.number().int().positive(), z.string()]),
|
|
106
106
|
});
|
|
107
107
|
|
|
108
|
+
const ScriptStepSchema = BaseStepSchema.extend({
|
|
109
|
+
type: z.literal('script'),
|
|
110
|
+
run: z.string(),
|
|
111
|
+
});
|
|
112
|
+
|
|
108
113
|
// ===== Discriminated Union for Steps =====
|
|
109
114
|
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
115
|
+
// biome-ignore lint/suspicious/noExplicitAny: Recursive Zod type
|
|
116
|
+
export const StepSchema: z.ZodType<any> = z.lazy(() =>
|
|
117
|
+
z.discriminatedUnion('type', [
|
|
118
|
+
ShellStepSchema,
|
|
119
|
+
LlmStepSchema,
|
|
120
|
+
WorkflowStepSchema,
|
|
121
|
+
FileStepSchema,
|
|
122
|
+
RequestStepSchema,
|
|
123
|
+
HumanStepSchema,
|
|
124
|
+
SleepStepSchema,
|
|
125
|
+
ScriptStepSchema,
|
|
126
|
+
])
|
|
127
|
+
);
|
|
119
128
|
|
|
120
129
|
// ===== Workflow Schema =====
|
|
121
130
|
|
|
@@ -152,6 +161,7 @@ export type FileStep = z.infer<typeof FileStepSchema>;
|
|
|
152
161
|
export type RequestStep = z.infer<typeof RequestStepSchema>;
|
|
153
162
|
export type HumanStep = z.infer<typeof HumanStepSchema>;
|
|
154
163
|
export type SleepStep = z.infer<typeof SleepStepSchema>;
|
|
164
|
+
export type ScriptStep = z.infer<typeof ScriptStepSchema>;
|
|
155
165
|
export type Workflow = z.infer<typeof WorkflowSchema>;
|
|
156
166
|
export type AgentTool = z.infer<typeof AgentToolSchema>;
|
|
157
167
|
export type Agent = z.infer<typeof AgentSchema>;
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { existsSync, readFileSync } from 'node:fs';
|
|
2
|
-
import { join } from 'node:path';
|
|
2
|
+
import { dirname, join } from 'node:path';
|
|
3
3
|
import * as yaml from 'js-yaml';
|
|
4
4
|
import { z } from 'zod';
|
|
5
5
|
import { ExpressionEvaluator } from '../expression/evaluator.ts';
|
|
@@ -15,6 +15,7 @@ export class WorkflowParser {
|
|
|
15
15
|
const content = readFileSync(path, 'utf-8');
|
|
16
16
|
const raw = yaml.load(content);
|
|
17
17
|
const workflow = WorkflowSchema.parse(raw);
|
|
18
|
+
const workflowDir = dirname(path);
|
|
18
19
|
|
|
19
20
|
// Resolve implicit dependencies from expressions
|
|
20
21
|
WorkflowParser.resolveImplicitDependencies(workflow);
|
|
@@ -23,7 +24,7 @@ export class WorkflowParser {
|
|
|
23
24
|
WorkflowParser.validateDAG(workflow);
|
|
24
25
|
|
|
25
26
|
// Validate agents exist
|
|
26
|
-
WorkflowParser.validateAgents(workflow);
|
|
27
|
+
WorkflowParser.validateAgents(workflow, workflowDir);
|
|
27
28
|
|
|
28
29
|
// Validate finally block
|
|
29
30
|
WorkflowParser.validateFinally(workflow);
|
|
@@ -121,12 +122,12 @@ export class WorkflowParser {
|
|
|
121
122
|
/**
|
|
122
123
|
* Validate that all agents referenced in LLM steps exist
|
|
123
124
|
*/
|
|
124
|
-
private static validateAgents(workflow: Workflow): void {
|
|
125
|
+
private static validateAgents(workflow: Workflow, baseDir?: string): void {
|
|
125
126
|
const allSteps = [...workflow.steps, ...(workflow.finally || [])];
|
|
126
127
|
for (const step of allSteps) {
|
|
127
128
|
if (step.type === 'llm') {
|
|
128
129
|
try {
|
|
129
|
-
resolveAgentPath(step.agent);
|
|
130
|
+
resolveAgentPath(step.agent, baseDir);
|
|
130
131
|
} catch (error) {
|
|
131
132
|
throw new Error(`Agent "${step.agent}" referenced in step "${step.id}" not found.`);
|
|
132
133
|
}
|