@phenixstar/talon 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/.env.example +72 -0
  2. package/Dockerfile +161 -0
  3. package/Dockerfile.router +16 -0
  4. package/LICENSE +661 -0
  5. package/README.md +709 -0
  6. package/bin/talon.js +96 -0
  7. package/bin/talon.mjs +96 -0
  8. package/configs/config-schema.json +160 -0
  9. package/configs/example-config.yaml +50 -0
  10. package/configs/mcp-allowlist.json +47 -0
  11. package/configs/model-routing.yaml +39 -0
  12. package/configs/router-config.json +73 -0
  13. package/configs/talon-seccomp.json +89 -0
  14. package/dist/cli/dependency-checker.d.ts +25 -0
  15. package/dist/cli/dependency-checker.d.ts.map +1 -0
  16. package/dist/cli/dependency-checker.js +165 -0
  17. package/dist/cli/dependency-checker.js.map +1 -0
  18. package/dist/cli/doctor.d.ts +2 -0
  19. package/dist/cli/doctor.d.ts.map +1 -0
  20. package/dist/cli/doctor.js +127 -0
  21. package/dist/cli/doctor.js.map +1 -0
  22. package/dist/cli/env-configurator.d.ts +27 -0
  23. package/dist/cli/env-configurator.d.ts.map +1 -0
  24. package/dist/cli/env-configurator.js +115 -0
  25. package/dist/cli/env-configurator.js.map +1 -0
  26. package/dist/cli/setup-renderer.d.ts +23 -0
  27. package/dist/cli/setup-renderer.d.ts.map +1 -0
  28. package/dist/cli/setup-renderer.js +71 -0
  29. package/dist/cli/setup-renderer.js.map +1 -0
  30. package/dist/cli/setup.d.ts +2 -0
  31. package/dist/cli/setup.d.ts.map +1 -0
  32. package/dist/cli/setup.js +302 -0
  33. package/dist/cli/setup.js.map +1 -0
  34. package/dist/types/activity-logger.d.ts +10 -0
  35. package/dist/types/activity-logger.d.ts.map +1 -0
  36. package/dist/types/activity-logger.js +7 -0
  37. package/dist/types/activity-logger.js.map +1 -0
  38. package/dist/types/agents.d.ts +39 -0
  39. package/dist/types/agents.d.ts.map +1 -0
  40. package/dist/types/agents.js +28 -0
  41. package/dist/types/agents.js.map +1 -0
  42. package/dist/types/audit.d.ts +28 -0
  43. package/dist/types/audit.d.ts.map +1 -0
  44. package/dist/types/audit.js +7 -0
  45. package/dist/types/audit.js.map +1 -0
  46. package/dist/types/backtesting.d.ts +45 -0
  47. package/dist/types/backtesting.d.ts.map +1 -0
  48. package/dist/types/backtesting.js +3 -0
  49. package/dist/types/backtesting.js.map +1 -0
  50. package/dist/types/config.d.ts +48 -0
  51. package/dist/types/config.d.ts.map +1 -0
  52. package/dist/types/config.js +7 -0
  53. package/dist/types/config.js.map +1 -0
  54. package/dist/types/errors.d.ts +55 -0
  55. package/dist/types/errors.d.ts.map +1 -0
  56. package/dist/types/errors.js +41 -0
  57. package/dist/types/errors.js.map +1 -0
  58. package/dist/types/evolution.d.ts +36 -0
  59. package/dist/types/evolution.d.ts.map +1 -0
  60. package/dist/types/evolution.js +14 -0
  61. package/dist/types/evolution.js.map +1 -0
  62. package/dist/types/index.d.ts +11 -0
  63. package/dist/types/index.d.ts.map +1 -0
  64. package/dist/types/index.js +16 -0
  65. package/dist/types/index.js.map +1 -0
  66. package/dist/types/metrics.d.ts +13 -0
  67. package/dist/types/metrics.d.ts.map +1 -0
  68. package/dist/types/metrics.js +7 -0
  69. package/dist/types/metrics.js.map +1 -0
  70. package/dist/types/resilience.d.ts +30 -0
  71. package/dist/types/resilience.d.ts.map +1 -0
  72. package/dist/types/resilience.js +7 -0
  73. package/dist/types/resilience.js.map +1 -0
  74. package/dist/types/result.d.ts +42 -0
  75. package/dist/types/result.d.ts.map +1 -0
  76. package/dist/types/result.js +30 -0
  77. package/dist/types/result.js.map +1 -0
  78. package/docker-compose.yml +91 -0
  79. package/package.json +75 -0
  80. package/prompts/exploit-auth.txt +423 -0
  81. package/prompts/exploit-authz.txt +425 -0
  82. package/prompts/exploit-injection.txt +452 -0
  83. package/prompts/exploit-ssrf.txt +502 -0
  84. package/prompts/exploit-xss.txt +442 -0
  85. package/prompts/pipeline-testing/exploit-auth.txt +31 -0
  86. package/prompts/pipeline-testing/exploit-authz.txt +31 -0
  87. package/prompts/pipeline-testing/exploit-injection.txt +31 -0
  88. package/prompts/pipeline-testing/exploit-ssrf.txt +31 -0
  89. package/prompts/pipeline-testing/exploit-xss.txt +31 -0
  90. package/prompts/pipeline-testing/pre-recon-code.txt +1 -0
  91. package/prompts/pipeline-testing/recon.txt +1 -0
  92. package/prompts/pipeline-testing/report-executive.txt +1 -0
  93. package/prompts/pipeline-testing/vuln-auth.txt +13 -0
  94. package/prompts/pipeline-testing/vuln-authz.txt +13 -0
  95. package/prompts/pipeline-testing/vuln-injection.txt +13 -0
  96. package/prompts/pipeline-testing/vuln-ssrf.txt +13 -0
  97. package/prompts/pipeline-testing/vuln-xss.txt +13 -0
  98. package/prompts/pre-recon-code.txt +403 -0
  99. package/prompts/recon.txt +382 -0
  100. package/prompts/report-executive.txt +126 -0
  101. package/prompts/shared/_exploit-scope.txt +14 -0
  102. package/prompts/shared/_rules.txt +2 -0
  103. package/prompts/shared/_target.txt +1 -0
  104. package/prompts/shared/_vuln-scope.txt +1 -0
  105. package/prompts/shared/login-instructions.txt +82 -0
  106. package/prompts/vuln-auth.txt +268 -0
  107. package/prompts/vuln-authz.txt +373 -0
  108. package/prompts/vuln-injection.txt +380 -0
  109. package/prompts/vuln-ssrf.txt +315 -0
  110. package/prompts/vuln-xss.txt +304 -0
  111. package/talon +459 -0
  112. package/talon.ps1 +348 -0
package/talon ADDED
@@ -0,0 +1,459 @@
1
+ #!/bin/bash
2
+ # Talon CLI - AI Penetration Testing Framework
3
+
4
+ set -e
5
+
6
+ # Prevent MSYS from converting Unix paths (e.g. /repos/my-repo) to Windows paths
7
+ case "$OSTYPE" in
8
+ msys*) export MSYS_NO_PATHCONV=1 ;;
9
+ esac
10
+
11
+ # Detect Podman vs Docker and set compose files accordingly
12
+ # Podman doesn't support host-gateway, so we only include the Docker override for actual Docker
13
+ COMPOSE_BASE="docker-compose.yml"
14
+ if command -v podman &>/dev/null; then
15
+ # Podman detected (either native or via Docker Desktop shim) - use base config only
16
+ COMPOSE_OVERRIDE=""
17
+ else
18
+ # Docker detected - include extra_hosts override for Linux localhost access
19
+ COMPOSE_OVERRIDE="-f docker-compose.docker.yml"
20
+ fi
21
+ COMPOSE_FILE="$COMPOSE_BASE"
22
+
23
+ # Load .env if present
24
+ if [ -f .env ]; then
25
+ set -a
26
+ source .env
27
+ set +a
28
+ fi
29
+
30
+ show_help() {
31
+ cat << 'EOF'
32
+
33
+ ████████╗ █████╗ ██╗ ██████╗ ███╗ ██╗
34
+ ╚══██╔══╝██╔══██╗██║ ██╔═══██╗████╗ ██║
35
+ ██║ ███████║██║ ██║ ██║██╔██╗ ██║
36
+ ██║ ██╔══██║██║ ██║ ██║██║╚██╗██║
37
+ ██║ ██║ ██║███████╗╚██████╔╝██║ ╚████║
38
+ ╚═╝ ╚═╝ ╚═╝╚══════╝ ╚═════╝ ╚═╝ ╚═══╝
39
+
40
+ AI Penetration Testing Framework
41
+
42
+ Usage:
43
+ ./talon setup Interactive setup wizard
44
+ ./talon doctor Validate configuration and dependencies
45
+ ./talon start URL=<url> REPO=<name> Start a pentest workflow
46
+ ./talon benchmark TARGET=<name> Run benchmark and compute F1 metrics
47
+ ./talon evolve GENERATIONS=<n> Run N evolution generations on gene pool
48
+ ./talon workspaces List all workspaces
49
+ ./talon logs ID=<workflow-id> Tail logs for a specific workflow
50
+ ./talon stop Stop all containers
51
+ ./talon help Show this help message
52
+
53
+ Options for 'start':
54
+ REPO=<name> Folder name under ./repos/ (e.g. REPO=repo-name)
55
+ CONFIG=<path> Configuration file (YAML)
56
+ OUTPUT=<path> Output directory for reports (default: ./audit-logs/)
57
+ WORKSPACE=<name> Named workspace (auto-resumes if exists, creates if new)
58
+ PIPELINE_TESTING=true Use minimal prompts for fast testing
59
+ ROUTER=true Route requests through claude-code-router (multi-model support)
60
+
61
+ Options for 'stop':
62
+ CLEAN=true Remove all data including volumes
63
+
64
+ Examples:
65
+ ./talon start URL=https://example.com REPO=repo-name
66
+ ./talon start URL=https://example.com REPO=repo-name WORKSPACE=q1-audit
67
+ ./talon start URL=https://example.com REPO=repo-name CONFIG=./config.yaml
68
+ ./talon start URL=https://example.com REPO=repo-name OUTPUT=./my-reports
69
+ ./talon workspaces
70
+ ./talon logs ID=example.com_talon-1234567890
71
+ ./talon stop CLEAN=true
72
+
73
+ Monitor workflows at http://localhost:8233
74
+ EOF
75
+ }
76
+
77
+ # Parse KEY=value arguments into variables
78
+ parse_args() {
79
+ for arg in "$@"; do
80
+ case "$arg" in
81
+ URL=*) URL="${arg#URL=}" ;;
82
+ REPO=*) REPO="${arg#REPO=}" ;;
83
+ CONFIG=*) CONFIG="${arg#CONFIG=}" ;;
84
+ OUTPUT=*) OUTPUT="${arg#OUTPUT=}" ;;
85
+ ID=*) ID="${arg#ID=}" ;;
86
+ CLEAN=*) CLEAN="${arg#CLEAN=}" ;;
87
+ PIPELINE_TESTING=*) PIPELINE_TESTING="${arg#PIPELINE_TESTING=}" ;;
88
+ REBUILD=*) REBUILD="${arg#REBUILD=}" ;;
89
+ ROUTER=*) ROUTER="${arg#ROUTER=}" ;;
90
+ WORKSPACE=*) WORKSPACE="${arg#WORKSPACE=}" ;;
91
+ TARGET=*) TARGET="${arg#TARGET=}" ;;
92
+ GENERATIONS=*) GENERATIONS="${arg#GENERATIONS=}" ;;
93
+ GROUND_TRUTH=*) GROUND_TRUTH="${arg#GROUND_TRUTH=}" ;;
94
+ SEED=*) SEED="${arg#SEED=}" ;;
95
+ esac
96
+ done
97
+ }
98
+
99
+ # Check if Temporal is running and healthy
100
+ is_temporal_ready() {
101
+ docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE exec -T temporal \
102
+ temporal operator cluster health --address localhost:7233 2>/dev/null | grep -q "SERVING"
103
+ }
104
+
105
+ # Ensure containers are running with correct mounts
106
+ ensure_containers() {
107
+ # Quick check: if Temporal is already healthy, just refresh worker env
108
+ if is_temporal_ready; then
109
+ # Always refresh worker to pick up env var changes (ROUTER mode, model overrides, etc.)
110
+ docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE up -d worker 2>/dev/null || true
111
+ return 0
112
+ fi
113
+
114
+ # Need to start containers
115
+ echo "Starting Talon containers..."
116
+ if [ "$REBUILD" = "true" ]; then
117
+ # Force rebuild without cache (use when code changes aren't being picked up)
118
+ echo "Rebuilding with --no-cache..."
119
+ docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE build --no-cache worker
120
+ fi
121
+ docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE up -d --build
122
+
123
+ # Wait for Temporal to be ready
124
+ echo "Waiting for Temporal to be ready..."
125
+ for i in $(seq 1 30); do
126
+ if is_temporal_ready; then
127
+ echo "Temporal is ready!"
128
+ return 0
129
+ fi
130
+ if [ "$i" -eq 30 ]; then
131
+ echo "Timeout waiting for Temporal"
132
+ exit 1
133
+ fi
134
+ sleep 2
135
+ done
136
+ }
137
+
138
+ cmd_start() {
139
+ parse_args "$@"
140
+
141
+ # Validate required vars
142
+ if [ -z "$URL" ] || [ -z "$REPO" ]; then
143
+ echo "ERROR: URL and REPO are required"
144
+ echo "Usage: ./talon start URL=<url> REPO=<name>"
145
+ exit 1
146
+ fi
147
+
148
+ # Check for API key (Bedrock and router modes can bypass this)
149
+ if [ -z "$ANTHROPIC_API_KEY" ] && [ -z "$CLAUDE_CODE_OAUTH_TOKEN" ]; then
150
+ if [ "$CLAUDE_CODE_USE_BEDROCK" = "1" ]; then
151
+ # Bedrock mode — validate required AWS credentials
152
+ MISSING=""
153
+ [ -z "$AWS_REGION" ] && MISSING="$MISSING AWS_REGION"
154
+ [ -z "$AWS_BEARER_TOKEN_BEDROCK" ] && MISSING="$MISSING AWS_BEARER_TOKEN_BEDROCK"
155
+ [ -z "$ANTHROPIC_SMALL_MODEL" ] && MISSING="$MISSING ANTHROPIC_SMALL_MODEL"
156
+ [ -z "$ANTHROPIC_MEDIUM_MODEL" ] && MISSING="$MISSING ANTHROPIC_MEDIUM_MODEL"
157
+ [ -z "$ANTHROPIC_LARGE_MODEL" ] && MISSING="$MISSING ANTHROPIC_LARGE_MODEL"
158
+ if [ -n "$MISSING" ]; then
159
+ echo "ERROR: Bedrock mode requires the following env vars in .env:$MISSING"
160
+ exit 1
161
+ fi
162
+ elif [ "$CLAUDE_CODE_USE_VERTEX" = "1" ]; then
163
+ # Vertex AI mode — validate required GCP credentials
164
+ MISSING=""
165
+ [ -z "$CLOUD_ML_REGION" ] && MISSING="$MISSING CLOUD_ML_REGION"
166
+ [ -z "$ANTHROPIC_VERTEX_PROJECT_ID" ] && MISSING="$MISSING ANTHROPIC_VERTEX_PROJECT_ID"
167
+ [ -z "$ANTHROPIC_SMALL_MODEL" ] && MISSING="$MISSING ANTHROPIC_SMALL_MODEL"
168
+ [ -z "$ANTHROPIC_MEDIUM_MODEL" ] && MISSING="$MISSING ANTHROPIC_MEDIUM_MODEL"
169
+ [ -z "$ANTHROPIC_LARGE_MODEL" ] && MISSING="$MISSING ANTHROPIC_LARGE_MODEL"
170
+ if [ -n "$MISSING" ]; then
171
+ echo "ERROR: Vertex AI mode requires the following env vars in .env:$MISSING"
172
+ exit 1
173
+ fi
174
+ # Validate service account key file (must be inside ./credentials/ for Docker mount)
175
+ if [ -z "$GOOGLE_APPLICATION_CREDENTIALS" ]; then
176
+ echo "ERROR: Vertex AI mode requires GOOGLE_APPLICATION_CREDENTIALS in .env"
177
+ echo " Place your service account key in ./credentials/ and set:"
178
+ echo " GOOGLE_APPLICATION_CREDENTIALS=./credentials/gcp-sa-key.json"
179
+ exit 1
180
+ fi
181
+ if [ ! -f "$GOOGLE_APPLICATION_CREDENTIALS" ]; then
182
+ echo "ERROR: Service account key file not found: $GOOGLE_APPLICATION_CREDENTIALS"
183
+ echo " Download a key from the GCP Console (IAM > Service Accounts > Keys)"
184
+ exit 1
185
+ fi
186
+ elif [ "$ROUTER" = "true" ] && { [ -n "$OPENAI_API_KEY" ] || [ -n "$OPENROUTER_API_KEY" ]; }; then
187
+ # Router mode with alternative provider - set a placeholder for SDK init
188
+ export ANTHROPIC_API_KEY="router-mode"
189
+ else
190
+ echo "ERROR: Set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN in .env"
191
+ echo " (or use CLAUDE_CODE_USE_BEDROCK=1 for AWS Bedrock,"
192
+ echo " CLAUDE_CODE_USE_VERTEX=1 for Google Vertex AI,"
193
+ echo " or ROUTER=true with OPENAI_API_KEY or OPENROUTER_API_KEY)"
194
+ exit 1
195
+ fi
196
+ fi
197
+
198
+ # Determine container path for REPO
199
+ # - If REPO is already a container path (/benchmarks/*, /repos/*), use as-is
200
+ # - Otherwise, treat as a folder name under ./repos/ (mounted at /repos in container)
201
+ case "$REPO" in
202
+ /benchmarks/*|/repos/*)
203
+ CONTAINER_REPO="$REPO"
204
+ ;;
205
+ *)
206
+ if [ ! -d "./repos/$REPO" ]; then
207
+ echo "ERROR: Repository not found at ./repos/$REPO"
208
+ echo ""
209
+ echo "Place your target repository under the ./repos/ directory"
210
+ exit 1
211
+ fi
212
+ CONTAINER_REPO="/repos/$REPO"
213
+ ;;
214
+ esac
215
+
216
+ # Handle custom OUTPUT directory
217
+ # Export OUTPUT_DIR for docker-compose volume mount BEFORE starting containers
218
+ if [ -n "$OUTPUT" ]; then
219
+ # Create output directory with write permissions for container user (UID 1001)
220
+ mkdir -p "$OUTPUT"
221
+ chmod 777 "$OUTPUT" # 777 required: container UID 1001 differs from host UID
222
+ export OUTPUT_DIR="$OUTPUT"
223
+ fi
224
+
225
+ # Handle ROUTER flag - set env vars BEFORE starting containers
226
+ if [ "$ROUTER" = "true" ]; then
227
+ # Set ANTHROPIC_BASE_URL to route through router
228
+ export ANTHROPIC_BASE_URL="http://router:3456"
229
+ # Generate router auth token if not set (must match router-config.json APIKEY)
230
+ if [ -z "$TALON_ROUTER_KEY" ]; then
231
+ export TALON_ROUTER_KEY=$(openssl rand -hex 32)
232
+ echo "Generated router key (set TALON_ROUTER_KEY in .env to persist)"
233
+ fi
234
+ export ANTHROPIC_AUTH_TOKEN="$TALON_ROUTER_KEY"
235
+ fi
236
+
237
+ # Ensure audit-logs directory exists with write permissions for container user (UID 1001)
238
+ mkdir -p ./audit-logs ./credentials
239
+ chmod 777 ./audit-logs # 777 required: container UID 1001 differs from host UID
240
+
241
+ # Ensure repo deliverables directory is writable by container user (UID 1001)
242
+ if [ -d "./repos/$REPO" ]; then
243
+ mkdir -p "./repos/$REPO/deliverables"
244
+ chmod 777 "./repos/$REPO/deliverables" # 777 required: container UID 1001 differs from host UID
245
+ fi
246
+
247
+ # Start router BEFORE main containers so worker can reach it on boot
248
+ if [ "$ROUTER" = "true" ]; then
249
+ if [ -z "$OPENAI_API_KEY" ] && [ -z "$OPENROUTER_API_KEY" ]; then
250
+ echo "WARNING: No provider API key set (OPENAI_API_KEY or OPENROUTER_API_KEY). Router may not work."
251
+ fi
252
+
253
+ echo "Starting claude-code-router..."
254
+ docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE --profile router up -d --build router
255
+
256
+ # Wait for router health check
257
+ echo "Waiting for router to be ready..."
258
+ for i in $(seq 1 15); do
259
+ if docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE --profile router ps router 2>/dev/null | grep -q "healthy"; then
260
+ echo "Router is ready!"
261
+ break
262
+ fi
263
+ if [ "$i" -eq 15 ]; then
264
+ echo "WARNING: Router health check timed out (continuing anyway)"
265
+ fi
266
+ sleep 2
267
+ done
268
+ fi
269
+
270
+ # Ensure containers are running (starts them if needed)
271
+ ensure_containers
272
+
273
+ # Build optional args
274
+ ARGS=""
275
+ [ -n "$CONFIG" ] && ARGS="$ARGS --config $CONFIG"
276
+
277
+ # Pass container path for output (where OUTPUT_DIR is mounted)
278
+ # Also pass display path so client can show the host path to user
279
+ if [ -n "$OUTPUT" ]; then
280
+ ARGS="$ARGS --output /app/output --display-output $OUTPUT"
281
+ fi
282
+
283
+ [ "$PIPELINE_TESTING" = "true" ] && ARGS="$ARGS --pipeline-testing"
284
+ [ -n "$WORKSPACE" ] && ARGS="$ARGS --workspace $WORKSPACE"
285
+
286
+ # Run the client to submit workflow
287
+ docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE exec -T worker \
288
+ node dist/temporal/client.js "$URL" "$CONTAINER_REPO" $ARGS
289
+ }
290
+
291
+ cmd_logs() {
292
+ parse_args "$@"
293
+
294
+ if [ -z "$ID" ]; then
295
+ echo "ERROR: ID is required"
296
+ echo "Usage: ./talon logs ID=<workflow-id>"
297
+ exit 1
298
+ fi
299
+
300
+ # Auto-discover the workflow log file
301
+ # 1. Check default location first
302
+ # 2. Search common output directories
303
+ # 3. Fall back to find command
304
+ WORKFLOW_LOG=""
305
+
306
+ if [ -f "./audit-logs/${ID}/workflow.log" ]; then
307
+ WORKFLOW_LOG="./audit-logs/${ID}/workflow.log"
308
+ else
309
+ # For resume workflow IDs (e.g. workspace_resume_123), check the original workspace
310
+ WORKSPACE_ID="${ID%%_resume_*}"
311
+ if [ "$WORKSPACE_ID" != "$ID" ] && [ -f "./audit-logs/${WORKSPACE_ID}/workflow.log" ]; then
312
+ WORKFLOW_LOG="./audit-logs/${WORKSPACE_ID}/workflow.log"
313
+ fi
314
+
315
+ # For named workspace IDs (e.g. workspace_talon-123), check the workspace name
316
+ if [ -z "$WORKFLOW_LOG" ]; then
317
+ WORKSPACE_ID="${ID%%_talon-*}"
318
+ if [ "$WORKSPACE_ID" != "$ID" ] && [ -f "./audit-logs/${WORKSPACE_ID}/workflow.log" ]; then
319
+ WORKFLOW_LOG="./audit-logs/${WORKSPACE_ID}/workflow.log"
320
+ fi
321
+ fi
322
+
323
+ if [ -z "$WORKFLOW_LOG" ]; then
324
+ # Search for the workflow directory (handles custom OUTPUT paths)
325
+ FOUND=$(find . -maxdepth 3 -path "*/${ID}/workflow.log" -type f 2>/dev/null | head -1)
326
+ if [ -n "$FOUND" ]; then
327
+ WORKFLOW_LOG="$FOUND"
328
+ fi
329
+ fi
330
+ fi
331
+
332
+ if [ -n "$WORKFLOW_LOG" ]; then
333
+ echo "Tailing workflow log: $WORKFLOW_LOG"
334
+ tail -f "$WORKFLOW_LOG"
335
+ else
336
+ echo "ERROR: Workflow log not found for ID: $ID"
337
+ echo ""
338
+ echo "Possible causes:"
339
+ echo " - Workflow hasn't started yet"
340
+ echo " - Workflow ID is incorrect"
341
+ echo ""
342
+ echo "Check the Temporal Web UI at http://localhost:8233 for workflow details"
343
+ exit 1
344
+ fi
345
+ }
346
+
347
+ cmd_workspaces() {
348
+ # Ensure containers are running (need worker to execute node)
349
+ ensure_containers
350
+
351
+ docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE exec -T worker \
352
+ node dist/temporal/workspaces.js
353
+ }
354
+
355
+ cmd_benchmark() {
356
+ parse_args "$@"
357
+
358
+ if [ -z "$TARGET" ]; then
359
+ echo "ERROR: TARGET is required"
360
+ echo "Usage: ./talon benchmark TARGET=<name> [GROUND_TRUTH=<csv>]"
361
+ exit 1
362
+ fi
363
+
364
+ ensure_containers
365
+
366
+ ARGS=""
367
+ [ -n "$GROUND_TRUTH" ] && ARGS="$ARGS --ground-truth $GROUND_TRUTH"
368
+
369
+ docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE exec -T worker \
370
+ node dist/backtesting/benchmark-cli.js "$TARGET" --output /app/audit-logs $ARGS
371
+ }
372
+
373
+ cmd_evolve() {
374
+ parse_args "$@"
375
+
376
+ GENERATIONS="${GENERATIONS:-1}"
377
+
378
+ ensure_containers
379
+
380
+ ARGS="--generations $GENERATIONS"
381
+ [ -n "$SEED" ] && ARGS="$ARGS --seed $SEED"
382
+
383
+ docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE exec -T worker \
384
+ node dist/evolution/evolve-cli.js $ARGS
385
+ }
386
+
387
+ cmd_setup() {
388
+ if ! command -v node &>/dev/null; then
389
+ echo "Node.js is required for the setup wizard."
390
+ echo "Install Node.js 22+: https://nodejs.org/en/download"
391
+ exit 1
392
+ fi
393
+ if [ ! -f "dist/cli/setup.js" ]; then
394
+ echo "Building Talon..."
395
+ npm install --silent && npm run build
396
+ fi
397
+ node dist/cli/setup.js "$@"
398
+ }
399
+
400
+ cmd_doctor() {
401
+ if ! command -v node &>/dev/null; then
402
+ echo "Node.js is required. Install: https://nodejs.org/en/download"
403
+ exit 1
404
+ fi
405
+ if [ ! -f "dist/cli/doctor.js" ]; then
406
+ echo "Building Talon..."
407
+ npm install --silent && npm run build
408
+ fi
409
+ node dist/cli/doctor.js "$@"
410
+ }
411
+
412
+ cmd_stop() {
413
+ parse_args "$@"
414
+
415
+ if [ "$CLEAN" = "true" ]; then
416
+ docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE --profile router down -v
417
+ else
418
+ docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE --profile router down
419
+ fi
420
+ }
421
+
422
+ # Main command dispatch
423
+ case "${1:-help}" in
424
+ setup)
425
+ shift
426
+ cmd_setup "$@"
427
+ ;;
428
+ doctor)
429
+ shift
430
+ cmd_doctor "$@"
431
+ ;;
432
+ start)
433
+ shift
434
+ cmd_start "$@"
435
+ ;;
436
+ logs)
437
+ shift
438
+ cmd_logs "$@"
439
+ ;;
440
+ benchmark)
441
+ shift
442
+ cmd_benchmark "$@"
443
+ ;;
444
+ evolve)
445
+ shift
446
+ cmd_evolve "$@"
447
+ ;;
448
+ workspaces)
449
+ shift
450
+ cmd_workspaces
451
+ ;;
452
+ stop)
453
+ shift
454
+ cmd_stop "$@"
455
+ ;;
456
+ help|--help|-h|*)
457
+ show_help
458
+ ;;
459
+ esac