loki-mode 6.17.2 → 6.19.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/SKILL.md +2 -2
- package/VERSION +1 -1
- package/autonomy/loki +728 -0
- package/autonomy/run.sh +285 -0
- package/dashboard/__init__.py +1 -1
- package/docs/INSTALLATION.md +1 -1
- package/mcp/__init__.py +1 -1
- package/package.json +1 -1
package/SKILL.md
CHANGED
|
@@ -3,7 +3,7 @@ name: loki-mode
|
|
|
3
3
|
description: Multi-agent autonomous startup system. Triggers on "Loki Mode". Takes PRD to deployed product with minimal human intervention. Requires --dangerously-skip-permissions flag.
|
|
4
4
|
---
|
|
5
5
|
|
|
6
|
-
# Loki Mode v6.
|
|
6
|
+
# Loki Mode v6.19.0
|
|
7
7
|
|
|
8
8
|
**You are an autonomous agent. You make decisions. You do not ask questions. You do not stop.**
|
|
9
9
|
|
|
@@ -267,4 +267,4 @@ The following features are documented in skill modules but not yet fully automat
|
|
|
267
267
|
| Quality gates 3-reviewer system | Implemented (v5.35.0) | 5 specialist reviewers in `skills/quality-gates.md`; execution in run.sh |
|
|
268
268
|
| Benchmarks (HumanEval, SWE-bench) | Infrastructure only | Runner scripts and datasets exist in `benchmarks/`; no published results |
|
|
269
269
|
|
|
270
|
-
**v6.
|
|
270
|
+
**v6.19.0 | [Autonomi](https://www.autonomi.dev/) flagship product | ~260 lines core**
|
package/VERSION
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
6.
|
|
1
|
+
6.19.0
|
package/autonomy/loki
CHANGED
|
@@ -435,6 +435,8 @@ show_help() {
|
|
|
435
435
|
echo " agent [cmd] Agent type dispatch (list|info|run|start|review)"
|
|
436
436
|
echo " remote [PRD] Start remote session (connect from phone/browser, Claude Pro/Max)"
|
|
437
437
|
echo " trigger Event-driven autonomous execution (schedules, webhooks)"
|
|
438
|
+
echo " failover [cmd] Cross-provider auto-failover (status|--enable|--test|--chain)"
|
|
439
|
+
echo " plan <PRD> Dry-run PRD analysis: complexity, cost, and execution plan"
|
|
438
440
|
echo " version Show version"
|
|
439
441
|
echo " help Show this help"
|
|
440
442
|
echo ""
|
|
@@ -8298,6 +8300,726 @@ for line in sys.stdin:
|
|
|
8298
8300
|
esac
|
|
8299
8301
|
}
|
|
8300
8302
|
|
|
8303
|
+
# Cross-provider auto-failover management (v6.19.0)
|
|
8304
|
+
cmd_failover() {
|
|
8305
|
+
local loki_dir="${TARGET_DIR:-.}/.loki"
|
|
8306
|
+
local failover_file="$loki_dir/state/failover.json"
|
|
8307
|
+
local script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
8308
|
+
|
|
8309
|
+
if [ $# -eq 0 ]; then
|
|
8310
|
+
# No args: show current failover config and health
|
|
8311
|
+
if [ ! -f "$failover_file" ]; then
|
|
8312
|
+
echo -e "${BOLD}Cross-Provider Auto-Failover${NC}"
|
|
8313
|
+
echo ""
|
|
8314
|
+
echo "Status: DISABLED"
|
|
8315
|
+
echo ""
|
|
8316
|
+
echo "Enable with: loki failover --enable"
|
|
8317
|
+
echo "Or set: LOKI_FAILOVER=true loki start"
|
|
8318
|
+
return 0
|
|
8319
|
+
fi
|
|
8320
|
+
|
|
8321
|
+
echo -e "${BOLD}Cross-Provider Auto-Failover${NC}"
|
|
8322
|
+
echo ""
|
|
8323
|
+
python3 << 'PYEOF'
|
|
8324
|
+
import json, os
|
|
8325
|
+
fpath = os.path.join(os.environ.get('TARGET_DIR', '.'), '.loki/state/failover.json')
|
|
8326
|
+
try:
|
|
8327
|
+
with open(fpath) as f:
|
|
8328
|
+
d = json.load(f)
|
|
8329
|
+
status = "ENABLED" if d.get("enabled") else "DISABLED"
|
|
8330
|
+
print(f"Status: {status}")
|
|
8331
|
+
print(f"Chain: {' -> '.join(d.get('chain', []))}")
|
|
8332
|
+
print(f"Current provider: {d.get('currentProvider', 'unknown')}")
|
|
8333
|
+
print(f"Primary provider: {d.get('primaryProvider', 'unknown')}")
|
|
8334
|
+
print(f"Failover count: {d.get('failoverCount', 0)}")
|
|
8335
|
+
last = d.get('lastFailover')
|
|
8336
|
+
print(f"Last failover: {last if last else 'never'}")
|
|
8337
|
+
print()
|
|
8338
|
+
print("Health Status:")
|
|
8339
|
+
health = d.get('healthCheck', {})
|
|
8340
|
+
for provider in d.get('chain', []):
|
|
8341
|
+
h = health.get(provider, 'unknown')
|
|
8342
|
+
indicator = '[OK]' if h == 'healthy' else '[--]' if h == 'unknown' else '[!!]'
|
|
8343
|
+
print(f" {indicator} {provider}: {h}")
|
|
8344
|
+
except FileNotFoundError:
|
|
8345
|
+
print("Failover not initialized. Run: loki failover --enable")
|
|
8346
|
+
except Exception as e:
|
|
8347
|
+
print(f"Error reading failover state: {e}")
|
|
8348
|
+
PYEOF
|
|
8349
|
+
return 0
|
|
8350
|
+
fi
|
|
8351
|
+
|
|
8352
|
+
while [[ $# -gt 0 ]]; do
|
|
8353
|
+
case "$1" in
|
|
8354
|
+
--enable)
|
|
8355
|
+
mkdir -p "$loki_dir/state"
|
|
8356
|
+
local current_provider
|
|
8357
|
+
current_provider=$(cat "$loki_dir/state/provider" 2>/dev/null || echo "claude")
|
|
8358
|
+
local chain="${LOKI_FAILOVER_CHAIN:-claude,codex,gemini}"
|
|
8359
|
+
|
|
8360
|
+
cat > "$failover_file" << FEOF
|
|
8361
|
+
{
|
|
8362
|
+
"enabled": true,
|
|
8363
|
+
"chain": $(printf '%s' "$chain" | python3 -c 'import sys,json; print(json.dumps(sys.stdin.read().strip().split(",")))' 2>/dev/null || echo '["claude","codex","gemini"]'),
|
|
8364
|
+
"currentProvider": "$current_provider",
|
|
8365
|
+
"primaryProvider": "$current_provider",
|
|
8366
|
+
"lastFailover": null,
|
|
8367
|
+
"failoverCount": 0,
|
|
8368
|
+
"healthCheck": {}
|
|
8369
|
+
}
|
|
8370
|
+
FEOF
|
|
8371
|
+
echo -e "${GREEN}Failover enabled${NC}"
|
|
8372
|
+
echo "Chain: $chain"
|
|
8373
|
+
echo "Primary: $current_provider"
|
|
8374
|
+
shift
|
|
8375
|
+
;;
|
|
8376
|
+
--disable)
|
|
8377
|
+
if [ -f "$failover_file" ]; then
|
|
8378
|
+
python3 -c "
|
|
8379
|
+
import json
|
|
8380
|
+
with open('$failover_file') as f: d = json.load(f)
|
|
8381
|
+
d['enabled'] = False
|
|
8382
|
+
with open('$failover_file', 'w') as f: json.dump(d, f, indent=2)
|
|
8383
|
+
" 2>/dev/null
|
|
8384
|
+
echo -e "${YELLOW}Failover disabled${NC}"
|
|
8385
|
+
else
|
|
8386
|
+
echo "Failover not initialized."
|
|
8387
|
+
fi
|
|
8388
|
+
shift
|
|
8389
|
+
;;
|
|
8390
|
+
--chain)
|
|
8391
|
+
shift
|
|
8392
|
+
local new_chain="$1"
|
|
8393
|
+
if [ -z "$new_chain" ]; then
|
|
8394
|
+
echo -e "${RED}Error: --chain requires a comma-separated list of providers${NC}"
|
|
8395
|
+
return 1
|
|
8396
|
+
fi
|
|
8397
|
+
# Validate each provider in chain
|
|
8398
|
+
local IFS=','
|
|
8399
|
+
for p in $new_chain; do
|
|
8400
|
+
case "$p" in
|
|
8401
|
+
claude|codex|gemini|cline|aider) ;;
|
|
8402
|
+
*) echo -e "${RED}Error: invalid provider '$p' in chain${NC}"; return 1 ;;
|
|
8403
|
+
esac
|
|
8404
|
+
done
|
|
8405
|
+
unset IFS
|
|
8406
|
+
|
|
8407
|
+
if [ ! -f "$failover_file" ]; then
|
|
8408
|
+
echo -e "${RED}Error: failover not initialized. Run: loki failover --enable${NC}"
|
|
8409
|
+
return 1
|
|
8410
|
+
fi
|
|
8411
|
+
|
|
8412
|
+
python3 -c "
|
|
8413
|
+
import json
|
|
8414
|
+
with open('$failover_file') as f: d = json.load(f)
|
|
8415
|
+
d['chain'] = '$new_chain'.split(',')
|
|
8416
|
+
with open('$failover_file', 'w') as f: json.dump(d, f, indent=2)
|
|
8417
|
+
" 2>/dev/null
|
|
8418
|
+
echo "Failover chain updated: $new_chain"
|
|
8419
|
+
shift
|
|
8420
|
+
;;
|
|
8421
|
+
--test)
|
|
8422
|
+
echo -e "${BOLD}Testing all providers in failover chain...${NC}"
|
|
8423
|
+
echo ""
|
|
8424
|
+
# Source provider loader for health checks
|
|
8425
|
+
if [ -f "$script_dir/../providers/loader.sh" ]; then
|
|
8426
|
+
source "$script_dir/../providers/loader.sh"
|
|
8427
|
+
fi
|
|
8428
|
+
|
|
8429
|
+
local chain_providers="claude,codex,gemini"
|
|
8430
|
+
if [ -f "$failover_file" ]; then
|
|
8431
|
+
chain_providers=$(python3 -c "import json; print(','.join(json.load(open('$failover_file')).get('chain', ['claude','codex','gemini'])))" 2>/dev/null || echo "claude,codex,gemini")
|
|
8432
|
+
fi
|
|
8433
|
+
|
|
8434
|
+
local IFS=','
|
|
8435
|
+
local all_pass=true
|
|
8436
|
+
for p in $chain_providers; do
|
|
8437
|
+
printf " %-10s " "$p:"
|
|
8438
|
+
|
|
8439
|
+
# Check CLI installed
|
|
8440
|
+
local cli_name="$p"
|
|
8441
|
+
if command -v "$cli_name" &>/dev/null; then
|
|
8442
|
+
local cli_version
|
|
8443
|
+
cli_version=$("$cli_name" --version 2>/dev/null | head -1 || echo "unknown")
|
|
8444
|
+
printf "CLI %-20s " "[$cli_version]"
|
|
8445
|
+
else
|
|
8446
|
+
printf "CLI %-20s " "[NOT INSTALLED]"
|
|
8447
|
+
echo -e "${RED}FAIL${NC}"
|
|
8448
|
+
all_pass=false
|
|
8449
|
+
continue
|
|
8450
|
+
fi
|
|
8451
|
+
|
|
8452
|
+
# Check API key
|
|
8453
|
+
local has_key=false
|
|
8454
|
+
case "$p" in
|
|
8455
|
+
claude) [ -n "${ANTHROPIC_API_KEY:-}" ] && has_key=true ;;
|
|
8456
|
+
codex) [ -n "${OPENAI_API_KEY:-}" ] && has_key=true ;;
|
|
8457
|
+
gemini) [ -n "${GOOGLE_API_KEY:-${GEMINI_API_KEY:-}}" ] && has_key=true ;;
|
|
8458
|
+
cline|aider) has_key=true ;; # Key check varies
|
|
8459
|
+
esac
|
|
8460
|
+
|
|
8461
|
+
if [ "$has_key" = "true" ]; then
|
|
8462
|
+
printf "Key [OK] "
|
|
8463
|
+
echo -e "${GREEN}PASS${NC}"
|
|
8464
|
+
else
|
|
8465
|
+
printf "Key [MISSING] "
|
|
8466
|
+
echo -e "${RED}FAIL${NC}"
|
|
8467
|
+
all_pass=false
|
|
8468
|
+
fi
|
|
8469
|
+
done
|
|
8470
|
+
unset IFS
|
|
8471
|
+
|
|
8472
|
+
echo ""
|
|
8473
|
+
if [ "$all_pass" = "true" ]; then
|
|
8474
|
+
echo -e "${GREEN}All providers healthy${NC}"
|
|
8475
|
+
else
|
|
8476
|
+
echo -e "${YELLOW}Some providers unavailable - failover chain may be limited${NC}"
|
|
8477
|
+
fi
|
|
8478
|
+
shift
|
|
8479
|
+
;;
|
|
8480
|
+
--reset)
|
|
8481
|
+
if [ -f "$failover_file" ]; then
|
|
8482
|
+
rm -f "$failover_file"
|
|
8483
|
+
echo "Failover state reset to defaults."
|
|
8484
|
+
else
|
|
8485
|
+
echo "No failover state to reset."
|
|
8486
|
+
fi
|
|
8487
|
+
shift
|
|
8488
|
+
;;
|
|
8489
|
+
--help|-h)
|
|
8490
|
+
echo -e "${BOLD}loki failover${NC} - Cross-provider auto-failover management (v6.19.0)"
|
|
8491
|
+
echo ""
|
|
8492
|
+
echo "Usage: loki failover [options]"
|
|
8493
|
+
echo ""
|
|
8494
|
+
echo "Options:"
|
|
8495
|
+
echo " (no args) Show failover status and health"
|
|
8496
|
+
echo " --enable Enable auto-failover"
|
|
8497
|
+
echo " --disable Disable auto-failover"
|
|
8498
|
+
echo " --chain X,Y,Z Set failover chain (e.g., claude,codex,gemini)"
|
|
8499
|
+
echo " --test Test all providers in chain"
|
|
8500
|
+
echo " --reset Reset failover state to defaults"
|
|
8501
|
+
echo " --help, -h Show this help"
|
|
8502
|
+
echo ""
|
|
8503
|
+
echo "Environment:"
|
|
8504
|
+
echo " LOKI_FAILOVER=true Enable failover at startup"
|
|
8505
|
+
echo " LOKI_FAILOVER_CHAIN=X,Y,Z Set default chain"
|
|
8506
|
+
echo ""
|
|
8507
|
+
echo "When a rate limit (429/529) is detected, Loki automatically switches"
|
|
8508
|
+
echo "to the next healthy provider in the chain. After each successful"
|
|
8509
|
+
echo "iteration on a fallback provider, the primary is health-checked and"
|
|
8510
|
+
echo "execution switches back when it recovers."
|
|
8511
|
+
return 0
|
|
8512
|
+
;;
|
|
8513
|
+
*)
|
|
8514
|
+
echo -e "${RED}Unknown option: $1${NC}"
|
|
8515
|
+
echo "Run 'loki failover --help' for usage."
|
|
8516
|
+
return 1
|
|
8517
|
+
;;
|
|
8518
|
+
esac
|
|
8519
|
+
done
|
|
8520
|
+
}
|
|
8521
|
+
|
|
8522
|
+
# Dry-run PRD analysis and cost estimation (v6.18.0)
|
|
8523
|
+
cmd_plan() {
|
|
8524
|
+
local prd_file=""
|
|
8525
|
+
local show_json=false
|
|
8526
|
+
local show_verbose=false
|
|
8527
|
+
|
|
8528
|
+
while [[ $# -gt 0 ]]; do
|
|
8529
|
+
case "$1" in
|
|
8530
|
+
--help|-h)
|
|
8531
|
+
echo -e "${BOLD}loki plan${NC} - Dry-run PRD analysis and cost estimation"
|
|
8532
|
+
echo ""
|
|
8533
|
+
echo "Usage: loki plan <PRD> [options]"
|
|
8534
|
+
echo ""
|
|
8535
|
+
echo "Analyzes a PRD file without executing anything. Outputs complexity,"
|
|
8536
|
+
echo "estimated iterations, token usage, cost, and execution plan."
|
|
8537
|
+
echo ""
|
|
8538
|
+
echo "Options:"
|
|
8539
|
+
echo " --json Machine-readable JSON output"
|
|
8540
|
+
echo " --verbose Show detailed per-iteration breakdown"
|
|
8541
|
+
echo " --help, -h Show this help"
|
|
8542
|
+
echo ""
|
|
8543
|
+
echo "Examples:"
|
|
8544
|
+
echo " loki plan ./prd.md"
|
|
8545
|
+
echo " loki plan ./prd.md --json"
|
|
8546
|
+
echo " loki plan ./prd.md --verbose"
|
|
8547
|
+
return 0
|
|
8548
|
+
;;
|
|
8549
|
+
--json) show_json=true; shift ;;
|
|
8550
|
+
--verbose) show_verbose=true; shift ;;
|
|
8551
|
+
*)
|
|
8552
|
+
if [ -z "$prd_file" ]; then
|
|
8553
|
+
prd_file="$1"
|
|
8554
|
+
fi
|
|
8555
|
+
shift
|
|
8556
|
+
;;
|
|
8557
|
+
esac
|
|
8558
|
+
done
|
|
8559
|
+
|
|
8560
|
+
if [ -z "$prd_file" ]; then
|
|
8561
|
+
echo -e "${RED}Usage: loki plan <PRD file>${NC}"
|
|
8562
|
+
echo "Run 'loki plan --help' for usage."
|
|
8563
|
+
return 1
|
|
8564
|
+
fi
|
|
8565
|
+
|
|
8566
|
+
if [ ! -f "$prd_file" ]; then
|
|
8567
|
+
echo -e "${RED}PRD file not found: $prd_file${NC}"
|
|
8568
|
+
return 1
|
|
8569
|
+
fi
|
|
8570
|
+
|
|
8571
|
+
local prd_path
|
|
8572
|
+
prd_path="$(cd "$(dirname "$prd_file")" && pwd)/$(basename "$prd_file")"
|
|
8573
|
+
|
|
8574
|
+
python3 -c "
|
|
8575
|
+
import json, sys, os, re, math
|
|
8576
|
+
|
|
8577
|
+
prd_path = sys.argv[1]
|
|
8578
|
+
show_json = sys.argv[2] == 'true'
|
|
8579
|
+
show_verbose = sys.argv[3] == 'true'
|
|
8580
|
+
|
|
8581
|
+
# Colors (disabled for JSON mode)
|
|
8582
|
+
if show_json:
|
|
8583
|
+
RED = GREEN = YELLOW = BLUE = CYAN = BOLD = DIM = NC = ''
|
|
8584
|
+
else:
|
|
8585
|
+
RED = '\033[0;31m'
|
|
8586
|
+
GREEN = '\033[0;32m'
|
|
8587
|
+
YELLOW = '\033[1;33m'
|
|
8588
|
+
BLUE = '\033[0;34m'
|
|
8589
|
+
CYAN = '\033[0;36m'
|
|
8590
|
+
BOLD = '\033[1m'
|
|
8591
|
+
DIM = '\033[2m'
|
|
8592
|
+
NC = '\033[0m'
|
|
8593
|
+
|
|
8594
|
+
# --- Read and analyze PRD ---
|
|
8595
|
+
with open(prd_path, 'r') as f:
|
|
8596
|
+
content = f.read()
|
|
8597
|
+
|
|
8598
|
+
prd_words = len(content.split())
|
|
8599
|
+
prd_lines = content.count('\n') + 1
|
|
8600
|
+
|
|
8601
|
+
# Detect PRD format
|
|
8602
|
+
is_json_prd = prd_path.endswith('.json')
|
|
8603
|
+
sections = []
|
|
8604
|
+
features = []
|
|
8605
|
+
endpoints = []
|
|
8606
|
+
integrations = []
|
|
8607
|
+
databases = []
|
|
8608
|
+
ui_components = []
|
|
8609
|
+
|
|
8610
|
+
if is_json_prd:
|
|
8611
|
+
try:
|
|
8612
|
+
prd_data = json.loads(content)
|
|
8613
|
+
features = prd_data.get('features', prd_data.get('requirements', []))
|
|
8614
|
+
if isinstance(features, list):
|
|
8615
|
+
features = [f.get('title', f.get('name', str(f))) if isinstance(f, dict) else str(f) for f in features]
|
|
8616
|
+
else:
|
|
8617
|
+
features = []
|
|
8618
|
+
except Exception:
|
|
8619
|
+
features = []
|
|
8620
|
+
else:
|
|
8621
|
+
# Markdown analysis
|
|
8622
|
+
for line in content.split('\n'):
|
|
8623
|
+
stripped = line.strip()
|
|
8624
|
+
if stripped.startswith('## '):
|
|
8625
|
+
sections.append(stripped[3:].strip())
|
|
8626
|
+
elif stripped.startswith('### '):
|
|
8627
|
+
sections.append(stripped[4:].strip())
|
|
8628
|
+
|
|
8629
|
+
# Count features: headers + checkboxes
|
|
8630
|
+
feature_patterns = re.findall(r'^(?:##?\s+|[-*]\s+\[.\]\s+)(.+)', content, re.MULTILINE)
|
|
8631
|
+
features = feature_patterns
|
|
8632
|
+
|
|
8633
|
+
# Count API endpoints
|
|
8634
|
+
endpoint_patterns = re.findall(
|
|
8635
|
+
r'(?:GET|POST|PUT|PATCH|DELETE|HEAD|OPTIONS)\s+[/\w{}:.-]+',
|
|
8636
|
+
content, re.IGNORECASE
|
|
8637
|
+
)
|
|
8638
|
+
endpoints = endpoint_patterns
|
|
8639
|
+
|
|
8640
|
+
# Count external integrations
|
|
8641
|
+
integration_keywords = [
|
|
8642
|
+
'oauth', 'saml', 'oidc', 'stripe', 'twilio', 'sendgrid',
|
|
8643
|
+
'aws', 'gcp', 'azure', 'firebase', 's3', 'redis', 'kafka',
|
|
8644
|
+
'elasticsearch', 'rabbitmq', 'webhook', 'socket.io', 'websocket',
|
|
8645
|
+
'graphql', 'grpc', 'docker', 'kubernetes', 'k8s'
|
|
8646
|
+
]
|
|
8647
|
+
content_lower = content.lower()
|
|
8648
|
+
for kw in integration_keywords:
|
|
8649
|
+
if kw in content_lower:
|
|
8650
|
+
integrations.append(kw)
|
|
8651
|
+
|
|
8652
|
+
# Count database mentions
|
|
8653
|
+
db_keywords = ['postgresql', 'postgres', 'mysql', 'mongodb', 'sqlite',
|
|
8654
|
+
'dynamodb', 'cassandra', 'redis', 'prisma', 'typeorm',
|
|
8655
|
+
'sequelize', 'drizzle', 'knex', 'database', 'migration']
|
|
8656
|
+
for kw in db_keywords:
|
|
8657
|
+
if kw in content_lower:
|
|
8658
|
+
databases.append(kw)
|
|
8659
|
+
|
|
8660
|
+
# Count UI components
|
|
8661
|
+
ui_keywords = ['dashboard', 'form', 'table', 'modal', 'navbar',
|
|
8662
|
+
'sidebar', 'chart', 'graph', 'calendar', 'kanban',
|
|
8663
|
+
'drag.and.drop', 'responsive', 'mobile', 'animation']
|
|
8664
|
+
for kw in ui_keywords:
|
|
8665
|
+
if re.search(kw, content_lower):
|
|
8666
|
+
ui_components.append(kw)
|
|
8667
|
+
|
|
8668
|
+
feature_count = len(features)
|
|
8669
|
+
endpoint_count = len(endpoints)
|
|
8670
|
+
integration_count = len(integrations)
|
|
8671
|
+
db_count = len(databases)
|
|
8672
|
+
ui_count = len(ui_components)
|
|
8673
|
+
section_count = len(sections)
|
|
8674
|
+
|
|
8675
|
+
# --- Determine complexity ---
|
|
8676
|
+
complexity_score = 0
|
|
8677
|
+
complexity_reasons = []
|
|
8678
|
+
|
|
8679
|
+
if prd_words < 200 and feature_count < 5:
|
|
8680
|
+
complexity = 'simple'
|
|
8681
|
+
complexity_reasons.append('Short PRD (<200 words, <5 features)')
|
|
8682
|
+
elif prd_words > 3000 or feature_count > 30:
|
|
8683
|
+
complexity_score += 3
|
|
8684
|
+
complexity_reasons.append(f'Large PRD ({prd_words} words, {feature_count} features)')
|
|
8685
|
+
elif prd_words > 1000 or feature_count > 15:
|
|
8686
|
+
complexity_score += 2
|
|
8687
|
+
complexity_reasons.append(f'Medium PRD ({prd_words} words, {feature_count} features)')
|
|
8688
|
+
else:
|
|
8689
|
+
complexity_score += 1
|
|
8690
|
+
complexity_reasons.append(f'Standard PRD ({prd_words} words, {feature_count} features)')
|
|
8691
|
+
|
|
8692
|
+
if endpoint_count > 20:
|
|
8693
|
+
complexity_score += 2
|
|
8694
|
+
complexity_reasons.append(f'{endpoint_count} API endpoints detected')
|
|
8695
|
+
elif endpoint_count > 5:
|
|
8696
|
+
complexity_score += 1
|
|
8697
|
+
complexity_reasons.append(f'{endpoint_count} API endpoints detected')
|
|
8698
|
+
|
|
8699
|
+
if integration_count > 3:
|
|
8700
|
+
complexity_score += 2
|
|
8701
|
+
complexity_reasons.append(str(integration_count) + ' external integrations: ' + ', '.join(integrations[:5]))
|
|
8702
|
+
elif integration_count > 0:
|
|
8703
|
+
complexity_score += 1
|
|
8704
|
+
complexity_reasons.append(str(integration_count) + ' external integrations: ' + ', '.join(integrations))
|
|
8705
|
+
|
|
8706
|
+
if db_count > 1:
|
|
8707
|
+
complexity_score += 1
|
|
8708
|
+
complexity_reasons.append('Multiple data stores: ' + ', '.join(databases[:4]))
|
|
8709
|
+
|
|
8710
|
+
if ui_count > 5:
|
|
8711
|
+
complexity_score += 1
|
|
8712
|
+
complexity_reasons.append(f'{ui_count} UI components detected')
|
|
8713
|
+
|
|
8714
|
+
# Map score to tier
|
|
8715
|
+
if complexity_score <= 1:
|
|
8716
|
+
complexity = 'simple'
|
|
8717
|
+
elif complexity_score <= 3:
|
|
8718
|
+
complexity = 'moderate'
|
|
8719
|
+
elif complexity_score <= 5:
|
|
8720
|
+
complexity = 'complex'
|
|
8721
|
+
else:
|
|
8722
|
+
complexity = 'enterprise'
|
|
8723
|
+
|
|
8724
|
+
# --- Estimate iterations ---
|
|
8725
|
+
iteration_map = {
|
|
8726
|
+
'simple': (3, 5),
|
|
8727
|
+
'moderate': (6, 12),
|
|
8728
|
+
'complex': (12, 24),
|
|
8729
|
+
'enterprise': (24, 48),
|
|
8730
|
+
}
|
|
8731
|
+
min_iter, max_iter = iteration_map[complexity]
|
|
8732
|
+
|
|
8733
|
+
# Adjust based on specific counts
|
|
8734
|
+
if endpoint_count > 10:
|
|
8735
|
+
max_iter = max(max_iter, max_iter + endpoint_count // 5)
|
|
8736
|
+
if integration_count > 2:
|
|
8737
|
+
max_iter += integration_count
|
|
8738
|
+
|
|
8739
|
+
estimated_iterations = (min_iter + max_iter) // 2
|
|
8740
|
+
|
|
8741
|
+
# --- Token estimation per iteration by RARV tier ---
|
|
8742
|
+
# RARV cycle: 4 iterations per cycle (plan, develop, develop, test)
|
|
8743
|
+
# Tokens estimated per iteration step
|
|
8744
|
+
tokens_per_tier = {
|
|
8745
|
+
'planning': {'input': 50000, 'output': 8000, 'model': 'Opus'},
|
|
8746
|
+
'development': {'input': 80000, 'output': 15000, 'model': 'Sonnet'},
|
|
8747
|
+
'fast': {'input': 30000, 'output': 5000, 'model': 'Haiku'},
|
|
8748
|
+
}
|
|
8749
|
+
|
|
8750
|
+
# Pricing per 1M tokens
|
|
8751
|
+
pricing = {
|
|
8752
|
+
'Opus': {'input': 15.00, 'output': 75.00},
|
|
8753
|
+
'Sonnet': {'input': 3.00, 'output': 15.00},
|
|
8754
|
+
'Haiku': {'input': 0.25, 'output': 1.25},
|
|
8755
|
+
}
|
|
8756
|
+
|
|
8757
|
+
# Build per-iteration plan
|
|
8758
|
+
iteration_plan = []
|
|
8759
|
+
total_input_tokens = 0
|
|
8760
|
+
total_output_tokens = 0
|
|
8761
|
+
total_cost = 0.0
|
|
8762
|
+
tier_totals = {'Opus': 0.0, 'Sonnet': 0.0, 'Haiku': 0.0}
|
|
8763
|
+
tier_iterations = {'Opus': 0, 'Sonnet': 0, 'Haiku': 0}
|
|
8764
|
+
|
|
8765
|
+
for i in range(estimated_iterations):
|
|
8766
|
+
rarv_step = i % 4
|
|
8767
|
+
if rarv_step == 0:
|
|
8768
|
+
tier = 'planning'
|
|
8769
|
+
phase_label = 'Reason (Planning)'
|
|
8770
|
+
elif rarv_step == 1:
|
|
8771
|
+
tier = 'development'
|
|
8772
|
+
phase_label = 'Act (Implementation)'
|
|
8773
|
+
elif rarv_step == 2:
|
|
8774
|
+
tier = 'development'
|
|
8775
|
+
phase_label = 'Reflect (Review)'
|
|
8776
|
+
else:
|
|
8777
|
+
tier = 'fast'
|
|
8778
|
+
phase_label = 'Verify (Testing)'
|
|
8779
|
+
|
|
8780
|
+
info = tokens_per_tier[tier]
|
|
8781
|
+
model = info['model']
|
|
8782
|
+
inp = info['input']
|
|
8783
|
+
out = info['output']
|
|
8784
|
+
|
|
8785
|
+
cost = (inp / 1_000_000) * pricing[model]['input'] + \
|
|
8786
|
+
(out / 1_000_000) * pricing[model]['output']
|
|
8787
|
+
|
|
8788
|
+
total_input_tokens += inp
|
|
8789
|
+
total_output_tokens += out
|
|
8790
|
+
total_cost += cost
|
|
8791
|
+
tier_totals[model] += cost
|
|
8792
|
+
tier_iterations[model] += 1
|
|
8793
|
+
|
|
8794
|
+
iteration_plan.append({
|
|
8795
|
+
'iteration': i + 1,
|
|
8796
|
+
'phase': phase_label,
|
|
8797
|
+
'model': model,
|
|
8798
|
+
'input_tokens': inp,
|
|
8799
|
+
'output_tokens': out,
|
|
8800
|
+
'cost_usd': round(cost, 4),
|
|
8801
|
+
})
|
|
8802
|
+
|
|
8803
|
+
# --- Execution plan (what each cycle tackles) ---
|
|
8804
|
+
execution_phases = []
|
|
8805
|
+
cycle_count = math.ceil(estimated_iterations / 4)
|
|
8806
|
+
phase_names = []
|
|
8807
|
+
|
|
8808
|
+
# Generate phase names based on PRD content
|
|
8809
|
+
if sections:
|
|
8810
|
+
phase_names = sections[:cycle_count]
|
|
8811
|
+
else:
|
|
8812
|
+
# Generic phases based on complexity
|
|
8813
|
+
generic = [
|
|
8814
|
+
'Project setup and scaffolding',
|
|
8815
|
+
'Core data models and database schema',
|
|
8816
|
+
'API endpoints and business logic',
|
|
8817
|
+
'Frontend components and pages',
|
|
8818
|
+
'Authentication and authorization',
|
|
8819
|
+
'External integrations',
|
|
8820
|
+
'Testing and quality assurance',
|
|
8821
|
+
'Performance optimization',
|
|
8822
|
+
'Documentation and deployment',
|
|
8823
|
+
'Edge cases and error handling',
|
|
8824
|
+
'Security hardening',
|
|
8825
|
+
'Final polish and review',
|
|
8826
|
+
]
|
|
8827
|
+
phase_names = generic[:cycle_count]
|
|
8828
|
+
|
|
8829
|
+
for idx, name in enumerate(phase_names):
|
|
8830
|
+
execution_phases.append({
|
|
8831
|
+
'cycle': idx + 1,
|
|
8832
|
+
'iterations': f'{idx * 4 + 1}-{min((idx + 1) * 4, estimated_iterations)}',
|
|
8833
|
+
'focus': name,
|
|
8834
|
+
})
|
|
8835
|
+
|
|
8836
|
+
# --- Time estimation ---
|
|
8837
|
+
# Average ~3.5 minutes per iteration (API calls + processing)
|
|
8838
|
+
minutes_per_iteration = 3.5
|
|
8839
|
+
total_minutes = estimated_iterations * minutes_per_iteration
|
|
8840
|
+
if total_minutes < 60:
|
|
8841
|
+
time_estimate = f'{int(total_minutes)} minutes'
|
|
8842
|
+
elif total_minutes < 120:
|
|
8843
|
+
time_estimate = f'{int(total_minutes // 60)} hour {int(total_minutes % 60)} minutes'
|
|
8844
|
+
else:
|
|
8845
|
+
time_estimate = f'{total_minutes / 60:.1f} hours'
|
|
8846
|
+
|
|
8847
|
+
min_time = min_iter * minutes_per_iteration
|
|
8848
|
+
max_time = max_iter * minutes_per_iteration
|
|
8849
|
+
|
|
8850
|
+
# --- Quality gates ---
|
|
8851
|
+
quality_gates = [
|
|
8852
|
+
{'gate': 'Static Analysis', 'trigger': 'Every iteration', 'applies': True},
|
|
8853
|
+
{'gate': 'Unit Tests', 'trigger': 'Verify phase (every 4th iteration)', 'applies': True},
|
|
8854
|
+
{'gate': 'Code Review (3-reviewer)', 'trigger': 'After implementation cycles', 'applies': complexity in ('complex', 'enterprise')},
|
|
8855
|
+
{'gate': 'Anti-sycophancy Check', 'trigger': 'On unanimous review approval', 'applies': complexity in ('complex', 'enterprise')},
|
|
8856
|
+
{'gate': 'Integration Tests', 'trigger': 'Mid-project and final', 'applies': integration_count > 0 or endpoint_count > 3},
|
|
8857
|
+
{'gate': 'Coverage Gate (>80%)', 'trigger': 'Final verification', 'applies': True},
|
|
8858
|
+
{'gate': 'Security Scan', 'trigger': 'Before completion', 'applies': integration_count > 0},
|
|
8859
|
+
{'gate': 'Performance Benchmark', 'trigger': 'Before completion', 'applies': complexity in ('complex', 'enterprise')},
|
|
8860
|
+
{'gate': 'Completion Council', 'trigger': 'After all phases', 'applies': True},
|
|
8861
|
+
]
|
|
8862
|
+
active_gates = [g for g in quality_gates if g['applies']]
|
|
8863
|
+
|
|
8864
|
+
# --- Provider recommendation ---
|
|
8865
|
+
if integration_count > 3 or complexity in ('complex', 'enterprise'):
|
|
8866
|
+
recommended_provider = 'Claude'
|
|
8867
|
+
provider_reason = 'Full feature support needed (subagents, parallel, MCP)'
|
|
8868
|
+
elif complexity == 'simple' and endpoint_count == 0:
|
|
8869
|
+
recommended_provider = 'Any (Claude/Codex/Gemini)'
|
|
8870
|
+
provider_reason = 'Simple project works with all providers'
|
|
8871
|
+
else:
|
|
8872
|
+
recommended_provider = 'Claude'
|
|
8873
|
+
provider_reason = 'Best iteration quality and tool support'
|
|
8874
|
+
|
|
8875
|
+
# --- Output ---
|
|
8876
|
+
if show_json:
|
|
8877
|
+
result = {
|
|
8878
|
+
'prd_file': prd_path,
|
|
8879
|
+
'prd_stats': {
|
|
8880
|
+
'words': prd_words,
|
|
8881
|
+
'lines': prd_lines,
|
|
8882
|
+
'sections': section_count,
|
|
8883
|
+
'features': feature_count,
|
|
8884
|
+
'endpoints': endpoint_count,
|
|
8885
|
+
'integrations': integrations,
|
|
8886
|
+
'databases': databases,
|
|
8887
|
+
'ui_components': ui_count,
|
|
8888
|
+
},
|
|
8889
|
+
'complexity': {
|
|
8890
|
+
'tier': complexity,
|
|
8891
|
+
'score': complexity_score,
|
|
8892
|
+
'reasons': complexity_reasons,
|
|
8893
|
+
},
|
|
8894
|
+
'iterations': {
|
|
8895
|
+
'estimated': estimated_iterations,
|
|
8896
|
+
'range': [min_iter, max_iter],
|
|
8897
|
+
'rarv_cycles': cycle_count,
|
|
8898
|
+
},
|
|
8899
|
+
'tokens': {
|
|
8900
|
+
'total_input': total_input_tokens,
|
|
8901
|
+
'total_output': total_output_tokens,
|
|
8902
|
+
'total': total_input_tokens + total_output_tokens,
|
|
8903
|
+
},
|
|
8904
|
+
'cost': {
|
|
8905
|
+
'total_usd': round(total_cost, 2),
|
|
8906
|
+
'by_model': {k: round(v, 2) for k, v in tier_totals.items()},
|
|
8907
|
+
'iterations_by_model': tier_iterations,
|
|
8908
|
+
},
|
|
8909
|
+
'time': {
|
|
8910
|
+
'estimated': time_estimate,
|
|
8911
|
+
'minutes': round(total_minutes, 1),
|
|
8912
|
+
'range_minutes': [round(min_time, 1), round(max_time, 1)],
|
|
8913
|
+
},
|
|
8914
|
+
'execution_plan': execution_phases,
|
|
8915
|
+
'quality_gates': [g['gate'] for g in active_gates],
|
|
8916
|
+
'provider': {
|
|
8917
|
+
'recommended': recommended_provider,
|
|
8918
|
+
'reason': provider_reason,
|
|
8919
|
+
},
|
|
8920
|
+
}
|
|
8921
|
+
if show_verbose:
|
|
8922
|
+
result['iteration_details'] = iteration_plan
|
|
8923
|
+
print(json.dumps(result, indent=2))
|
|
8924
|
+
sys.exit(0)
|
|
8925
|
+
|
|
8926
|
+
# --- Formatted output ---
|
|
8927
|
+
print()
|
|
8928
|
+
print(f'{BOLD}PRD Analysis: {os.path.basename(prd_path)}{NC}')
|
|
8929
|
+
print(f'{DIM}' + '=' * 60 + f'{NC}')
|
|
8930
|
+
|
|
8931
|
+
# PRD Stats
|
|
8932
|
+
print(f'\n{CYAN}PRD Statistics{NC}')
|
|
8933
|
+
print(f' Words: {prd_words} | Lines: {prd_lines} | Sections: {section_count}')
|
|
8934
|
+
print(f' Features: {feature_count} | Endpoints: {endpoint_count} | Integrations: {integration_count}')
|
|
8935
|
+
if databases:
|
|
8936
|
+
print(' Data stores: ' + ', '.join(databases[:4]))
|
|
8937
|
+
if ui_count > 0:
|
|
8938
|
+
print(f' UI components: {ui_count}')
|
|
8939
|
+
|
|
8940
|
+
# Complexity
|
|
8941
|
+
color = {'simple': GREEN, 'moderate': YELLOW, 'complex': RED, 'enterprise': RED}
|
|
8942
|
+
cx_color = color.get(complexity, NC)
|
|
8943
|
+
print(f'\n{CYAN}Complexity{NC}')
|
|
8944
|
+
print(f' Tier: {cx_color}{BOLD}{complexity.upper()}{NC} (score: {complexity_score})')
|
|
8945
|
+
for reason in complexity_reasons:
|
|
8946
|
+
print(f' {DIM}- {reason}{NC}')
|
|
8947
|
+
|
|
8948
|
+
# Iterations
|
|
8949
|
+
print(f'\n{CYAN}Estimated Iterations{NC}')
|
|
8950
|
+
print(f' Count: {BOLD}{estimated_iterations}{NC} (range: {min_iter}-{max_iter})')
|
|
8951
|
+
print(f' RARV cycles: {cycle_count} (4 iterations per cycle)')
|
|
8952
|
+
opus_n = tier_iterations.get('Opus', 0)
|
|
8953
|
+
sonnet_n = tier_iterations.get('Sonnet', 0)
|
|
8954
|
+
haiku_n = tier_iterations.get('Haiku', 0)
|
|
8955
|
+
print(f' Model distribution: Opus x{opus_n} | Sonnet x{sonnet_n} | Haiku x{haiku_n}')
|
|
8956
|
+
|
|
8957
|
+
# Tokens
|
|
8958
|
+
total_tok = total_input_tokens + total_output_tokens
|
|
8959
|
+
print(f'\n{CYAN}Token Usage Estimate{NC}')
|
|
8960
|
+
print(f' Input: {total_input_tokens:>12,} tokens')
|
|
8961
|
+
print(f' Output: {total_output_tokens:>12,} tokens')
|
|
8962
|
+
print(f' Total: {total_tok:>12,} tokens')
|
|
8963
|
+
|
|
8964
|
+
# Cost
|
|
8965
|
+
ds = chr(36) # dollar sign
|
|
8966
|
+
print(f'\n{CYAN}Cost Estimate{NC}')
|
|
8967
|
+
print(f' {BOLD}Total: {ds}{total_cost:.2f}{NC}')
|
|
8968
|
+
for model in ['Opus', 'Sonnet', 'Haiku']:
|
|
8969
|
+
pct = (tier_totals[model] / total_cost * 100) if total_cost > 0 else 0
|
|
8970
|
+
bar_len = int(pct / 5)
|
|
8971
|
+
bar = '#' * bar_len + '.' * (20 - bar_len)
|
|
8972
|
+
mc = tier_totals[model]
|
|
8973
|
+
print(' {:>6}: {}{:>7.2f} ({:4.1f}%) [{}]'.format(model, ds, mc, pct, bar))
|
|
8974
|
+
|
|
8975
|
+
# Time
|
|
8976
|
+
print(f'\n{CYAN}Time Estimate{NC}')
|
|
8977
|
+
print(f' Estimated: {BOLD}{time_estimate}{NC}')
|
|
8978
|
+
min_t_str = f'{int(min_time)} min' if min_time < 60 else f'{min_time/60:.1f} hr'
|
|
8979
|
+
max_t_str = f'{int(max_time)} min' if max_time < 60 else f'{max_time/60:.1f} hr'
|
|
8980
|
+
print(f' Range: {min_t_str} - {max_t_str}')
|
|
8981
|
+
|
|
8982
|
+
# Execution plan
|
|
8983
|
+
print(f'\n{CYAN}Execution Plan{NC}')
|
|
8984
|
+
for phase in execution_phases:
|
|
8985
|
+
c_num = phase.get('cycle', '')
|
|
8986
|
+
c_iters = phase.get('iterations', '')
|
|
8987
|
+
c_focus = phase.get('focus', '')
|
|
8988
|
+
print(f' {BOLD}Cycle {c_num}{NC} (iterations {c_iters}): {c_focus}')
|
|
8989
|
+
|
|
8990
|
+
# Quality gates
|
|
8991
|
+
print(f'\n{CYAN}Quality Gates ({len(active_gates)} active){NC}')
|
|
8992
|
+
for g in active_gates:
|
|
8993
|
+
g_name = g.get('gate', '')
|
|
8994
|
+
g_trig = g.get('trigger', '')
|
|
8995
|
+
print(f' {GREEN}[*]{NC} {g_name} -- {DIM}{g_trig}{NC}')
|
|
8996
|
+
|
|
8997
|
+
# Provider recommendation
|
|
8998
|
+
print(f'\n{CYAN}Recommended Provider{NC}')
|
|
8999
|
+
print(f' {BOLD}{recommended_provider}{NC}')
|
|
9000
|
+
print(f' {DIM}{provider_reason}{NC}')
|
|
9001
|
+
|
|
9002
|
+
# Verbose: per-iteration breakdown
|
|
9003
|
+
if show_verbose:
|
|
9004
|
+
print(f'\n{CYAN}Per-Iteration Breakdown{NC}')
|
|
9005
|
+
hdr = ' ' + DIM + '{:>4} {:<24} {:<7} {:>8} {:>8} {:>8}'.format('Iter', 'Phase', 'Model', 'Input', 'Output', 'Cost') + NC
|
|
9006
|
+
print(hdr)
|
|
9007
|
+
print(' ' + DIM + '-' * 65 + NC)
|
|
9008
|
+
for it in iteration_plan:
|
|
9009
|
+
i_num = it.get('iteration', 0)
|
|
9010
|
+
i_phase = it.get('phase', '')
|
|
9011
|
+
i_model = it.get('model', '')
|
|
9012
|
+
i_inp = it.get('input_tokens', 0)
|
|
9013
|
+
i_out = it.get('output_tokens', 0)
|
|
9014
|
+
i_cost = it.get('cost_usd', 0)
|
|
9015
|
+
print(' {:>4} {:<24} {:<7} {:>7,} {:>7,} {}{:>6.2f}'.format(i_num, i_phase, i_model, i_inp, i_out, ds, i_cost))
|
|
9016
|
+
|
|
9017
|
+
print(f'\n{DIM}This is an estimate. Actual usage depends on PRD complexity,')
|
|
9018
|
+
print(f'code review cycles, and test failures.{NC}')
|
|
9019
|
+
print()
|
|
9020
|
+
" "$prd_path" "$show_json" "$show_verbose"
|
|
9021
|
+
}
|
|
9022
|
+
|
|
8301
9023
|
# Main command dispatcher
|
|
8302
9024
|
main() {
|
|
8303
9025
|
if [ $# -eq 0 ]; then
|
|
@@ -8461,6 +9183,12 @@ main() {
|
|
|
8461
9183
|
trigger)
|
|
8462
9184
|
cmd_trigger "$@"
|
|
8463
9185
|
;;
|
|
9186
|
+
plan)
|
|
9187
|
+
cmd_plan "$@"
|
|
9188
|
+
;;
|
|
9189
|
+
failover)
|
|
9190
|
+
cmd_failover "$@"
|
|
9191
|
+
;;
|
|
8464
9192
|
version|--version|-v)
|
|
8465
9193
|
cmd_version
|
|
8466
9194
|
;;
|
package/autonomy/run.sh
CHANGED
|
@@ -6525,6 +6525,278 @@ calculate_wait() {
|
|
|
6525
6525
|
echo $wait_time
|
|
6526
6526
|
}
|
|
6527
6527
|
|
|
6528
|
+
#===============================================================================
|
|
6529
|
+
# Cross-Provider Auto-Failover (v6.19.0)
|
|
6530
|
+
#===============================================================================
|
|
6531
|
+
|
|
6532
|
+
# Initialize failover state file on startup
|
|
6533
|
+
init_failover_state() {
|
|
6534
|
+
local failover_dir="${TARGET_DIR:-.}/.loki/state"
|
|
6535
|
+
local failover_file="$failover_dir/failover.json"
|
|
6536
|
+
|
|
6537
|
+
# Only create if failover is enabled via env or config
|
|
6538
|
+
if [ "${LOKI_FAILOVER:-false}" != "true" ]; then
|
|
6539
|
+
return
|
|
6540
|
+
fi
|
|
6541
|
+
|
|
6542
|
+
mkdir -p "$failover_dir"
|
|
6543
|
+
|
|
6544
|
+
if [ ! -f "$failover_file" ]; then
|
|
6545
|
+
local chain="${LOKI_FAILOVER_CHAIN:-claude,codex,gemini}"
|
|
6546
|
+
local primary="${PROVIDER_NAME:-claude}"
|
|
6547
|
+
cat > "$failover_file" << FEOF
|
|
6548
|
+
{
|
|
6549
|
+
"enabled": true,
|
|
6550
|
+
"chain": $(printf '%s' "$chain" | python3 -c 'import sys,json; print(json.dumps(sys.stdin.read().strip().split(",")))' 2>/dev/null || echo '["claude","codex","gemini"]'),
|
|
6551
|
+
"currentProvider": "$primary",
|
|
6552
|
+
"primaryProvider": "$primary",
|
|
6553
|
+
"lastFailover": null,
|
|
6554
|
+
"failoverCount": 0,
|
|
6555
|
+
"healthCheck": {
|
|
6556
|
+
"$primary": "healthy"
|
|
6557
|
+
}
|
|
6558
|
+
}
|
|
6559
|
+
FEOF
|
|
6560
|
+
log_info "Failover initialized: chain=$chain, primary=$primary"
|
|
6561
|
+
fi
|
|
6562
|
+
}
|
|
6563
|
+
|
|
6564
|
+
# Read failover config from state file
|
|
6565
|
+
# Sets: FAILOVER_ENABLED, FAILOVER_CHAIN, FAILOVER_CURRENT, FAILOVER_PRIMARY
|
|
6566
|
+
read_failover_config() {
|
|
6567
|
+
local failover_file="${TARGET_DIR:-.}/.loki/state/failover.json"
|
|
6568
|
+
|
|
6569
|
+
if [ ! -f "$failover_file" ]; then
|
|
6570
|
+
FAILOVER_ENABLED="false"
|
|
6571
|
+
return 1
|
|
6572
|
+
fi
|
|
6573
|
+
|
|
6574
|
+
eval "$(python3 << 'PYEOF' 2>/dev/null || echo 'FAILOVER_ENABLED=false'
|
|
6575
|
+
import json, os
|
|
6576
|
+
try:
|
|
6577
|
+
with open(os.path.join(os.environ.get('TARGET_DIR', '.'), '.loki/state/failover.json')) as f:
|
|
6578
|
+
d = json.load(f)
|
|
6579
|
+
chain = ','.join(d.get('chain', ['claude','codex','gemini']))
|
|
6580
|
+
print(f'FAILOVER_ENABLED={str(d.get("enabled", False)).lower()}')
|
|
6581
|
+
print(f'FAILOVER_CHAIN="{chain}"')
|
|
6582
|
+
print(f'FAILOVER_CURRENT="{d.get("currentProvider", "claude")}"')
|
|
6583
|
+
print(f'FAILOVER_PRIMARY="{d.get("primaryProvider", "claude")}"')
|
|
6584
|
+
print(f'FAILOVER_COUNT={d.get("failoverCount", 0)}')
|
|
6585
|
+
except Exception:
|
|
6586
|
+
print('FAILOVER_ENABLED=false')
|
|
6587
|
+
PYEOF
|
|
6588
|
+
)"
|
|
6589
|
+
}
|
|
6590
|
+
|
|
6591
|
+
# Update failover state file
|
|
6592
|
+
update_failover_state() {
|
|
6593
|
+
local key="$1"
|
|
6594
|
+
local value="$2"
|
|
6595
|
+
local failover_file="${TARGET_DIR:-.}/.loki/state/failover.json"
|
|
6596
|
+
|
|
6597
|
+
[ ! -f "$failover_file" ] && return 1
|
|
6598
|
+
|
|
6599
|
+
python3 << PYEOF 2>/dev/null || true
|
|
6600
|
+
import json, os
|
|
6601
|
+
fpath = os.path.join(os.environ.get('TARGET_DIR', '.'), '.loki/state/failover.json')
|
|
6602
|
+
try:
|
|
6603
|
+
with open(fpath) as f:
|
|
6604
|
+
d = json.load(f)
|
|
6605
|
+
key = "$key"
|
|
6606
|
+
value = "$value"
|
|
6607
|
+
# Handle type conversion
|
|
6608
|
+
if value == "null":
|
|
6609
|
+
d[key] = None
|
|
6610
|
+
elif value == "true":
|
|
6611
|
+
d[key] = True
|
|
6612
|
+
elif value == "false":
|
|
6613
|
+
d[key] = False
|
|
6614
|
+
elif value.isdigit():
|
|
6615
|
+
d[key] = int(value)
|
|
6616
|
+
else:
|
|
6617
|
+
d[key] = value
|
|
6618
|
+
with open(fpath, 'w') as f:
|
|
6619
|
+
json.dump(d, f, indent=2)
|
|
6620
|
+
except Exception:
|
|
6621
|
+
pass
|
|
6622
|
+
PYEOF
|
|
6623
|
+
}
|
|
6624
|
+
|
|
6625
|
+
# Update health status for a specific provider in failover.json
|
|
6626
|
+
update_failover_health() {
|
|
6627
|
+
local provider="$1"
|
|
6628
|
+
local status="$2" # healthy, unhealthy, unknown
|
|
6629
|
+
local failover_file="${TARGET_DIR:-.}/.loki/state/failover.json"
|
|
6630
|
+
|
|
6631
|
+
[ ! -f "$failover_file" ] && return 1
|
|
6632
|
+
|
|
6633
|
+
python3 << PYEOF 2>/dev/null || true
|
|
6634
|
+
import json, os
|
|
6635
|
+
fpath = os.path.join(os.environ.get('TARGET_DIR', '.'), '.loki/state/failover.json')
|
|
6636
|
+
try:
|
|
6637
|
+
with open(fpath) as f:
|
|
6638
|
+
d = json.load(f)
|
|
6639
|
+
if 'healthCheck' not in d:
|
|
6640
|
+
d['healthCheck'] = {}
|
|
6641
|
+
d['healthCheck']["$provider"] = "$status"
|
|
6642
|
+
with open(fpath, 'w') as f:
|
|
6643
|
+
json.dump(d, f, indent=2)
|
|
6644
|
+
except Exception:
|
|
6645
|
+
pass
|
|
6646
|
+
PYEOF
|
|
6647
|
+
}
|
|
6648
|
+
|
|
6649
|
+
# Check provider health: API key exists + CLI installed
|
|
6650
|
+
# Returns: 0 if healthy, 1 if unhealthy
|
|
6651
|
+
check_provider_health() {
|
|
6652
|
+
local provider="$1"
|
|
6653
|
+
|
|
6654
|
+
# Check CLI is installed
|
|
6655
|
+
case "$provider" in
|
|
6656
|
+
claude)
|
|
6657
|
+
command -v claude &>/dev/null || return 1
|
|
6658
|
+
[ -n "${ANTHROPIC_API_KEY:-}" ] || return 1
|
|
6659
|
+
;;
|
|
6660
|
+
codex)
|
|
6661
|
+
command -v codex &>/dev/null || return 1
|
|
6662
|
+
[ -n "${OPENAI_API_KEY:-}" ] || return 1
|
|
6663
|
+
;;
|
|
6664
|
+
gemini)
|
|
6665
|
+
command -v gemini &>/dev/null || return 1
|
|
6666
|
+
[ -n "${GOOGLE_API_KEY:-${GEMINI_API_KEY:-}}" ] || return 1
|
|
6667
|
+
;;
|
|
6668
|
+
cline)
|
|
6669
|
+
command -v cline &>/dev/null || return 1
|
|
6670
|
+
;;
|
|
6671
|
+
aider)
|
|
6672
|
+
command -v aider &>/dev/null || return 1
|
|
6673
|
+
;;
|
|
6674
|
+
*)
|
|
6675
|
+
return 1
|
|
6676
|
+
;;
|
|
6677
|
+
esac
|
|
6678
|
+
|
|
6679
|
+
return 0
|
|
6680
|
+
}
|
|
6681
|
+
|
|
6682
|
+
# Attempt failover to next healthy provider in chain
|
|
6683
|
+
# Called when rate limit is detected on current provider
|
|
6684
|
+
# Returns: 0 if failover succeeded, 1 if all providers exhausted
|
|
6685
|
+
attempt_provider_failover() {
|
|
6686
|
+
read_failover_config || return 1
|
|
6687
|
+
|
|
6688
|
+
if [ "$FAILOVER_ENABLED" != "true" ]; then
|
|
6689
|
+
return 1
|
|
6690
|
+
fi
|
|
6691
|
+
|
|
6692
|
+
local current="${FAILOVER_CURRENT:-${PROVIDER_NAME:-claude}}"
|
|
6693
|
+
log_warn "Failover: rate limit on $current, checking chain: $FAILOVER_CHAIN"
|
|
6694
|
+
|
|
6695
|
+
# Mark current as unhealthy
|
|
6696
|
+
update_failover_health "$current" "unhealthy"
|
|
6697
|
+
|
|
6698
|
+
# Walk the chain looking for the next healthy provider
|
|
6699
|
+
local IFS=','
|
|
6700
|
+
local found_current=false
|
|
6701
|
+
local tried_wrap=false
|
|
6702
|
+
|
|
6703
|
+
# Two passes: first from current position to end, then from start to current
|
|
6704
|
+
for provider in $FAILOVER_CHAIN $FAILOVER_CHAIN; do
|
|
6705
|
+
if [ "$provider" = "$current" ]; then
|
|
6706
|
+
if [ "$found_current" = "true" ]; then
|
|
6707
|
+
# We've wrapped around, all exhausted
|
|
6708
|
+
break
|
|
6709
|
+
fi
|
|
6710
|
+
found_current=true
|
|
6711
|
+
continue
|
|
6712
|
+
fi
|
|
6713
|
+
|
|
6714
|
+
[ "$found_current" != "true" ] && continue
|
|
6715
|
+
|
|
6716
|
+
# Check if this provider is healthy
|
|
6717
|
+
if check_provider_health "$provider"; then
|
|
6718
|
+
log_info "Failover: switching from $current to $provider"
|
|
6719
|
+
|
|
6720
|
+
# Load the new provider config
|
|
6721
|
+
local provider_dir
|
|
6722
|
+
provider_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)/providers"
|
|
6723
|
+
if [ -f "$provider_dir/$provider.sh" ]; then
|
|
6724
|
+
source "$provider_dir/$provider.sh"
|
|
6725
|
+
fi
|
|
6726
|
+
|
|
6727
|
+
# Update state
|
|
6728
|
+
update_failover_state "currentProvider" "$provider"
|
|
6729
|
+
update_failover_state "lastFailover" "$(date -u +%Y-%m-%dT%H:%M:%SZ)"
|
|
6730
|
+
update_failover_state "failoverCount" "$((FAILOVER_COUNT + 1))"
|
|
6731
|
+
update_failover_health "$provider" "healthy"
|
|
6732
|
+
|
|
6733
|
+
# Update runtime provider vars
|
|
6734
|
+
PROVIDER_NAME="$provider"
|
|
6735
|
+
|
|
6736
|
+
emit_event_json "provider_failover" \
|
|
6737
|
+
"from=$current" \
|
|
6738
|
+
"to=$provider" \
|
|
6739
|
+
"reason=rate_limit" \
|
|
6740
|
+
"iteration=$ITERATION_COUNT" 2>/dev/null || true
|
|
6741
|
+
|
|
6742
|
+
log_info "Failover: now using $provider (failover #$((FAILOVER_COUNT + 1)))"
|
|
6743
|
+
return 0
|
|
6744
|
+
else
|
|
6745
|
+
log_debug "Failover: $provider is unhealthy, skipping"
|
|
6746
|
+
update_failover_health "$provider" "unhealthy"
|
|
6747
|
+
fi
|
|
6748
|
+
done
|
|
6749
|
+
|
|
6750
|
+
log_warn "Failover: all providers in chain exhausted, falling back to retry"
|
|
6751
|
+
return 1
|
|
6752
|
+
}
|
|
6753
|
+
|
|
6754
|
+
# Check if primary provider has recovered after running on a fallback
|
|
6755
|
+
# Called after each successful iteration when on a non-primary provider
|
|
6756
|
+
# Returns: 0 if switched back to primary, 1 if still on fallback
|
|
6757
|
+
check_primary_recovery() {
|
|
6758
|
+
read_failover_config || return 1
|
|
6759
|
+
|
|
6760
|
+
if [ "$FAILOVER_ENABLED" != "true" ]; then
|
|
6761
|
+
return 1
|
|
6762
|
+
fi
|
|
6763
|
+
|
|
6764
|
+
local current="${FAILOVER_CURRENT:-${PROVIDER_NAME:-claude}}"
|
|
6765
|
+
local primary="${FAILOVER_PRIMARY:-claude}"
|
|
6766
|
+
|
|
6767
|
+
# Already on primary
|
|
6768
|
+
if [ "$current" = "$primary" ]; then
|
|
6769
|
+
return 1
|
|
6770
|
+
fi
|
|
6771
|
+
|
|
6772
|
+
# Check if primary is healthy again
|
|
6773
|
+
if check_provider_health "$primary"; then
|
|
6774
|
+
log_info "Failover: primary provider $primary appears healthy, switching back"
|
|
6775
|
+
|
|
6776
|
+
# Load primary provider config
|
|
6777
|
+
local provider_dir
|
|
6778
|
+
provider_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)/providers"
|
|
6779
|
+
if [ -f "$provider_dir/$primary.sh" ]; then
|
|
6780
|
+
source "$provider_dir/$primary.sh"
|
|
6781
|
+
fi
|
|
6782
|
+
|
|
6783
|
+
update_failover_state "currentProvider" "$primary"
|
|
6784
|
+
update_failover_health "$primary" "healthy"
|
|
6785
|
+
|
|
6786
|
+
PROVIDER_NAME="$primary"
|
|
6787
|
+
|
|
6788
|
+
emit_event_json "provider_recovery" \
|
|
6789
|
+
"from=$current" \
|
|
6790
|
+
"to=$primary" \
|
|
6791
|
+
"iteration=$ITERATION_COUNT" 2>/dev/null || true
|
|
6792
|
+
|
|
6793
|
+
log_info "Failover: recovered to primary provider $primary"
|
|
6794
|
+
return 0
|
|
6795
|
+
fi
|
|
6796
|
+
|
|
6797
|
+
return 1
|
|
6798
|
+
}
|
|
6799
|
+
|
|
6528
6800
|
#===============================================================================
|
|
6529
6801
|
# Rate Limit Detection
|
|
6530
6802
|
#===============================================================================
|
|
@@ -8145,6 +8417,9 @@ run_autonomous() {
|
|
|
8145
8417
|
load_state
|
|
8146
8418
|
local retry=$RETRY_COUNT
|
|
8147
8419
|
|
|
8420
|
+
# Initialize Cross-Provider Failover (v6.19.0)
|
|
8421
|
+
init_failover_state
|
|
8422
|
+
|
|
8148
8423
|
# Initialize Completion Council (v5.25.0)
|
|
8149
8424
|
if type council_init &>/dev/null; then
|
|
8150
8425
|
council_init "$prd_path"
|
|
@@ -8784,6 +9059,9 @@ if __name__ == "__main__":
|
|
|
8784
9059
|
log_warn "Council will evaluate at next check interval (every ${COUNCIL_CHECK_INTERVAL:-5} iterations)"
|
|
8785
9060
|
fi
|
|
8786
9061
|
|
|
9062
|
+
# Cross-provider failover: check if primary has recovered (v6.19.0)
|
|
9063
|
+
check_primary_recovery 2>/dev/null || true
|
|
9064
|
+
|
|
8787
9065
|
# SUCCESS exit - continue IMMEDIATELY to next iteration (no wait!)
|
|
8788
9066
|
log_step "Starting next iteration..."
|
|
8789
9067
|
((retry++))
|
|
@@ -8801,6 +9079,13 @@ if __name__ == "__main__":
|
|
|
8801
9079
|
local wait_time
|
|
8802
9080
|
|
|
8803
9081
|
if [ $rate_limit_wait -gt 0 ]; then
|
|
9082
|
+
# Cross-provider failover (v6.19.0): try switching provider before waiting
|
|
9083
|
+
if attempt_provider_failover 2>/dev/null; then
|
|
9084
|
+
log_info "Failover succeeded - retrying immediately with ${PROVIDER_NAME}"
|
|
9085
|
+
((retry++))
|
|
9086
|
+
continue
|
|
9087
|
+
fi
|
|
9088
|
+
|
|
8804
9089
|
wait_time=$rate_limit_wait
|
|
8805
9090
|
local human_time=$(format_duration $wait_time)
|
|
8806
9091
|
log_warn "Rate limit detected! Waiting until reset (~$human_time)..."
|
package/dashboard/__init__.py
CHANGED
package/docs/INSTALLATION.md
CHANGED
package/mcp/__init__.py
CHANGED