loki-mode 4.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +691 -0
- package/SKILL.md +191 -0
- package/VERSION +1 -0
- package/autonomy/.loki/dashboard/index.html +2634 -0
- package/autonomy/CONSTITUTION.md +508 -0
- package/autonomy/README.md +201 -0
- package/autonomy/config.example.yaml +152 -0
- package/autonomy/loki +526 -0
- package/autonomy/run.sh +3636 -0
- package/bin/loki-mode.js +26 -0
- package/bin/postinstall.js +60 -0
- package/docs/ACKNOWLEDGEMENTS.md +234 -0
- package/docs/COMPARISON.md +325 -0
- package/docs/COMPETITIVE-ANALYSIS.md +333 -0
- package/docs/INSTALLATION.md +547 -0
- package/docs/auto-claude-comparison.md +276 -0
- package/docs/cursor-comparison.md +225 -0
- package/docs/dashboard-guide.md +355 -0
- package/docs/screenshots/README.md +149 -0
- package/docs/screenshots/dashboard-agents.png +0 -0
- package/docs/screenshots/dashboard-tasks.png +0 -0
- package/docs/thick2thin.md +173 -0
- package/package.json +48 -0
- package/references/advanced-patterns.md +453 -0
- package/references/agent-types.md +243 -0
- package/references/agents.md +1043 -0
- package/references/business-ops.md +550 -0
- package/references/competitive-analysis.md +216 -0
- package/references/confidence-routing.md +371 -0
- package/references/core-workflow.md +275 -0
- package/references/cursor-learnings.md +207 -0
- package/references/deployment.md +604 -0
- package/references/lab-research-patterns.md +534 -0
- package/references/mcp-integration.md +186 -0
- package/references/memory-system.md +467 -0
- package/references/openai-patterns.md +647 -0
- package/references/production-patterns.md +568 -0
- package/references/prompt-repetition.md +192 -0
- package/references/quality-control.md +437 -0
- package/references/sdlc-phases.md +410 -0
- package/references/task-queue.md +361 -0
- package/references/tool-orchestration.md +691 -0
- package/skills/00-index.md +120 -0
- package/skills/agents.md +249 -0
- package/skills/artifacts.md +174 -0
- package/skills/github-integration.md +218 -0
- package/skills/model-selection.md +125 -0
- package/skills/parallel-workflows.md +526 -0
- package/skills/patterns-advanced.md +188 -0
- package/skills/production.md +292 -0
- package/skills/quality-gates.md +180 -0
- package/skills/testing.md +149 -0
- package/skills/troubleshooting.md +109 -0
package/autonomy/run.sh
ADDED
|
@@ -0,0 +1,3636 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
#===============================================================================
|
|
3
|
+
# Loki Mode - Autonomous Runner
|
|
4
|
+
# Single script that handles prerequisites, setup, and autonomous execution
|
|
5
|
+
#
|
|
6
|
+
# Usage:
|
|
7
|
+
# ./autonomy/run.sh [OPTIONS] [PRD_PATH]
|
|
8
|
+
# ./autonomy/run.sh ./docs/requirements.md
|
|
9
|
+
# ./autonomy/run.sh # Interactive mode
|
|
10
|
+
# ./autonomy/run.sh --parallel # Parallel mode with git worktrees
|
|
11
|
+
# ./autonomy/run.sh --parallel ./prd.md # Parallel mode with PRD
|
|
12
|
+
#
|
|
13
|
+
# Environment Variables:
|
|
14
|
+
# LOKI_MAX_RETRIES - Max retry attempts (default: 50)
|
|
15
|
+
# LOKI_BASE_WAIT - Base wait time in seconds (default: 60)
|
|
16
|
+
# LOKI_MAX_WAIT - Max wait time in seconds (default: 3600)
|
|
17
|
+
# LOKI_SKIP_PREREQS - Skip prerequisite checks (default: false)
|
|
18
|
+
# LOKI_DASHBOARD - Enable web dashboard (default: true)
|
|
19
|
+
# LOKI_DASHBOARD_PORT - Dashboard port (default: 57374)
|
|
20
|
+
#
|
|
21
|
+
# Resource Monitoring (prevents system overload):
|
|
22
|
+
# LOKI_RESOURCE_CHECK_INTERVAL - Check resources every N seconds (default: 300 = 5min)
|
|
23
|
+
# LOKI_RESOURCE_CPU_THRESHOLD - CPU % threshold to warn (default: 80)
|
|
24
|
+
# LOKI_RESOURCE_MEM_THRESHOLD - Memory % threshold to warn (default: 80)
|
|
25
|
+
#
|
|
26
|
+
# Security & Autonomy Controls (Enterprise):
|
|
27
|
+
# LOKI_STAGED_AUTONOMY - Require approval before execution (default: false)
|
|
28
|
+
# LOKI_AUDIT_LOG - Enable audit logging (default: false)
|
|
29
|
+
# LOKI_MAX_PARALLEL_AGENTS - Limit concurrent agent spawning (default: 10)
|
|
30
|
+
# LOKI_SANDBOX_MODE - Run in sandboxed container (default: false, requires Docker)
|
|
31
|
+
# LOKI_ALLOWED_PATHS - Comma-separated paths agents can modify (default: all)
|
|
32
|
+
# LOKI_BLOCKED_COMMANDS - Comma-separated blocked shell commands (default: rm -rf /)
|
|
33
|
+
#
|
|
34
|
+
# SDLC Phase Controls (all enabled by default, set to 'false' to skip):
|
|
35
|
+
# LOKI_PHASE_UNIT_TESTS - Run unit tests (default: true)
|
|
36
|
+
# LOKI_PHASE_API_TESTS - Functional API testing (default: true)
|
|
37
|
+
# LOKI_PHASE_E2E_TESTS - E2E/UI testing with Playwright (default: true)
|
|
38
|
+
# LOKI_PHASE_SECURITY - Security scanning OWASP/auth (default: true)
|
|
39
|
+
# LOKI_PHASE_INTEGRATION - Integration tests SAML/OIDC/SSO (default: true)
|
|
40
|
+
# LOKI_PHASE_CODE_REVIEW - 3-reviewer parallel code review (default: true)
|
|
41
|
+
# LOKI_PHASE_WEB_RESEARCH - Competitor/feature gap research (default: true)
|
|
42
|
+
# LOKI_PHASE_PERFORMANCE - Load/performance testing (default: true)
|
|
43
|
+
# LOKI_PHASE_ACCESSIBILITY - WCAG compliance testing (default: true)
|
|
44
|
+
# LOKI_PHASE_REGRESSION - Regression testing (default: true)
|
|
45
|
+
# LOKI_PHASE_UAT - UAT simulation (default: true)
|
|
46
|
+
#
|
|
47
|
+
# Autonomous Loop Controls (Ralph Wiggum Mode):
|
|
48
|
+
# LOKI_COMPLETION_PROMISE - EXPLICIT stop condition text (default: none - runs forever)
|
|
49
|
+
# Example: "ALL TESTS PASSING 100%"
|
|
50
|
+
# Only stops when Claude outputs this EXACT text
|
|
51
|
+
# LOKI_MAX_ITERATIONS - Max loop iterations before exit (default: 1000)
|
|
52
|
+
# LOKI_PERPETUAL_MODE - Ignore ALL completion signals (default: false)
|
|
53
|
+
# Set to 'true' for truly infinite operation
|
|
54
|
+
#
|
|
55
|
+
# 2026 Research Enhancements:
|
|
56
|
+
# LOKI_PROMPT_REPETITION - Enable prompt repetition for Haiku agents (default: true)
|
|
57
|
+
# arXiv 2512.14982v1: Improves accuracy 4-5x on structured tasks
|
|
58
|
+
# LOKI_CONFIDENCE_ROUTING - Enable confidence-based routing (default: true)
|
|
59
|
+
# HN Production: 4-tier routing (auto-approve, direct, supervisor, escalate)
|
|
60
|
+
# LOKI_AUTONOMY_MODE - Autonomy level (default: perpetual)
|
|
61
|
+
# Options: perpetual, checkpoint, supervised
|
|
62
|
+
# Tim Dettmers: "Shorter bursts of autonomy with feedback loops"
|
|
63
|
+
#
|
|
64
|
+
# Parallel Workflows (Git Worktrees):
|
|
65
|
+
# LOKI_PARALLEL_MODE - Enable git worktree-based parallelism (default: false)
|
|
66
|
+
# Use --parallel flag or set to 'true'
|
|
67
|
+
# LOKI_MAX_WORKTREES - Maximum parallel worktrees (default: 5)
|
|
68
|
+
# LOKI_MAX_PARALLEL_SESSIONS - Maximum concurrent Claude sessions (default: 3)
|
|
69
|
+
# LOKI_PARALLEL_TESTING - Run testing stream in parallel (default: true)
|
|
70
|
+
# LOKI_PARALLEL_DOCS - Run documentation stream in parallel (default: true)
|
|
71
|
+
# LOKI_PARALLEL_BLOG - Run blog stream if site has blog (default: false)
|
|
72
|
+
# LOKI_AUTO_MERGE - Auto-merge completed features (default: true)
|
|
73
|
+
#
|
|
74
|
+
# Complexity Tiers (Auto-Claude pattern):
|
|
75
|
+
# LOKI_COMPLEXITY - Force complexity tier (default: auto)
|
|
76
|
+
# Options: auto, simple, standard, complex
|
|
77
|
+
# Simple (3 phases): 1-2 files, single service, UI fixes, text changes
|
|
78
|
+
# Standard (6 phases): 3-10 files, 1-2 services, features, bug fixes
|
|
79
|
+
# Complex (8 phases): 10+ files, multiple services, external integrations
|
|
80
|
+
#
|
|
81
|
+
# GitHub Integration (v4.1.0):
|
|
82
|
+
# LOKI_GITHUB_IMPORT - Import open issues as tasks (default: false)
|
|
83
|
+
# LOKI_GITHUB_PR - Create PR when feature complete (default: false)
|
|
84
|
+
# LOKI_GITHUB_SYNC - Sync status back to issues (default: false)
|
|
85
|
+
# LOKI_GITHUB_REPO - Override repo detection (default: from git remote)
|
|
86
|
+
# LOKI_GITHUB_LABELS - Filter by labels (comma-separated)
|
|
87
|
+
# LOKI_GITHUB_MILESTONE - Filter by milestone
|
|
88
|
+
# LOKI_GITHUB_ASSIGNEE - Filter by assignee
|
|
89
|
+
# LOKI_GITHUB_LIMIT - Max issues to import (default: 100)
|
|
90
|
+
# LOKI_GITHUB_PR_LABEL - Label for PRs (default: none, avoids error if label missing)
|
|
91
|
+
#
|
|
92
|
+
# Desktop Notifications (v4.1.0):
|
|
93
|
+
# LOKI_NOTIFICATIONS - Enable desktop notifications (default: true)
|
|
94
|
+
# LOKI_NOTIFICATION_SOUND - Play sound with notifications (default: true)
|
|
95
|
+
#
|
|
96
|
+
# Human Intervention (Auto-Claude pattern):
|
|
97
|
+
# PAUSE file: touch .loki/PAUSE - pauses after current session
|
|
98
|
+
# HUMAN_INPUT.md: echo "instructions" > .loki/HUMAN_INPUT.md
|
|
99
|
+
# STOP file: touch .loki/STOP - stops immediately
|
|
100
|
+
# Ctrl+C (once): Pauses execution, shows options
|
|
101
|
+
# Ctrl+C (twice): Exits immediately
|
|
102
|
+
#===============================================================================
|
|
103
|
+
|
|
104
|
+
set -uo pipefail
|
|
105
|
+
|
|
106
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
107
|
+
PROJECT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
108
|
+
|
|
109
|
+
#===============================================================================
|
|
110
|
+
# Self-Copy Protection
|
|
111
|
+
# Bash reads scripts incrementally, so editing a running script corrupts execution.
|
|
112
|
+
# Solution: Copy ourselves to /tmp and run from there. The original can be safely edited.
|
|
113
|
+
#===============================================================================
|
|
114
|
+
if [[ -z "${LOKI_RUNNING_FROM_TEMP:-}" ]]; then
|
|
115
|
+
TEMP_SCRIPT="/tmp/loki-run-$$.sh"
|
|
116
|
+
cp "${BASH_SOURCE[0]}" "$TEMP_SCRIPT"
|
|
117
|
+
chmod +x "$TEMP_SCRIPT"
|
|
118
|
+
export LOKI_RUNNING_FROM_TEMP=1
|
|
119
|
+
export LOKI_ORIGINAL_SCRIPT_DIR="$SCRIPT_DIR"
|
|
120
|
+
export LOKI_ORIGINAL_PROJECT_DIR="$PROJECT_DIR"
|
|
121
|
+
exec "$TEMP_SCRIPT" "$@"
|
|
122
|
+
fi
|
|
123
|
+
|
|
124
|
+
# Restore original paths when running from temp
|
|
125
|
+
SCRIPT_DIR="${LOKI_ORIGINAL_SCRIPT_DIR:-$SCRIPT_DIR}"
|
|
126
|
+
PROJECT_DIR="${LOKI_ORIGINAL_PROJECT_DIR:-$PROJECT_DIR}"
|
|
127
|
+
|
|
128
|
+
# Clean up temp script on exit
|
|
129
|
+
trap 'rm -f "${BASH_SOURCE[0]}" 2>/dev/null' EXIT
|
|
130
|
+
|
|
131
|
+
#===============================================================================
|
|
132
|
+
# Configuration File Support (v4.1.0)
|
|
133
|
+
# Loads settings from config file, environment variables take precedence
|
|
134
|
+
#===============================================================================
|
|
135
|
+
load_config_file() {
|
|
136
|
+
local config_file=""
|
|
137
|
+
|
|
138
|
+
# Search for config file in order of priority
|
|
139
|
+
# Security: Reject symlinks to prevent path traversal attacks
|
|
140
|
+
# 1. Project-local config
|
|
141
|
+
if [ -f ".loki/config.yaml" ] && [ ! -L ".loki/config.yaml" ]; then
|
|
142
|
+
config_file=".loki/config.yaml"
|
|
143
|
+
elif [ -f ".loki/config.yml" ] && [ ! -L ".loki/config.yml" ]; then
|
|
144
|
+
config_file=".loki/config.yml"
|
|
145
|
+
# 2. User-global config (symlinks allowed in home dir - user controls it)
|
|
146
|
+
elif [ -f "${HOME}/.config/loki-mode/config.yaml" ]; then
|
|
147
|
+
config_file="${HOME}/.config/loki-mode/config.yaml"
|
|
148
|
+
elif [ -f "${HOME}/.config/loki-mode/config.yml" ]; then
|
|
149
|
+
config_file="${HOME}/.config/loki-mode/config.yml"
|
|
150
|
+
fi
|
|
151
|
+
|
|
152
|
+
# If no config file found, return silently
|
|
153
|
+
if [ -z "$config_file" ]; then
|
|
154
|
+
return 0
|
|
155
|
+
fi
|
|
156
|
+
|
|
157
|
+
# Check for yq (YAML parser)
|
|
158
|
+
if ! command -v yq &> /dev/null; then
|
|
159
|
+
# Fallback: parse simple YAML with sed/grep
|
|
160
|
+
parse_simple_yaml "$config_file"
|
|
161
|
+
return 0
|
|
162
|
+
fi
|
|
163
|
+
|
|
164
|
+
# Use yq for proper YAML parsing
|
|
165
|
+
parse_yaml_with_yq "$config_file"
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
# Fallback YAML parser for simple key: value format
|
|
169
|
+
parse_simple_yaml() {
|
|
170
|
+
local file="$1"
|
|
171
|
+
|
|
172
|
+
# Parse core settings
|
|
173
|
+
set_from_yaml "$file" "core.max_retries" "LOKI_MAX_RETRIES"
|
|
174
|
+
set_from_yaml "$file" "core.base_wait" "LOKI_BASE_WAIT"
|
|
175
|
+
set_from_yaml "$file" "core.max_wait" "LOKI_MAX_WAIT"
|
|
176
|
+
set_from_yaml "$file" "core.skip_prereqs" "LOKI_SKIP_PREREQS"
|
|
177
|
+
|
|
178
|
+
# Dashboard
|
|
179
|
+
set_from_yaml "$file" "dashboard.enabled" "LOKI_DASHBOARD"
|
|
180
|
+
set_from_yaml "$file" "dashboard.port" "LOKI_DASHBOARD_PORT"
|
|
181
|
+
|
|
182
|
+
# Resources
|
|
183
|
+
set_from_yaml "$file" "resources.check_interval" "LOKI_RESOURCE_CHECK_INTERVAL"
|
|
184
|
+
set_from_yaml "$file" "resources.cpu_threshold" "LOKI_RESOURCE_CPU_THRESHOLD"
|
|
185
|
+
set_from_yaml "$file" "resources.mem_threshold" "LOKI_RESOURCE_MEM_THRESHOLD"
|
|
186
|
+
|
|
187
|
+
# Security
|
|
188
|
+
set_from_yaml "$file" "security.staged_autonomy" "LOKI_STAGED_AUTONOMY"
|
|
189
|
+
set_from_yaml "$file" "security.audit_log" "LOKI_AUDIT_LOG"
|
|
190
|
+
set_from_yaml "$file" "security.max_parallel_agents" "LOKI_MAX_PARALLEL_AGENTS"
|
|
191
|
+
set_from_yaml "$file" "security.sandbox_mode" "LOKI_SANDBOX_MODE"
|
|
192
|
+
set_from_yaml "$file" "security.allowed_paths" "LOKI_ALLOWED_PATHS"
|
|
193
|
+
set_from_yaml "$file" "security.blocked_commands" "LOKI_BLOCKED_COMMANDS"
|
|
194
|
+
|
|
195
|
+
# Phases
|
|
196
|
+
set_from_yaml "$file" "phases.unit_tests" "LOKI_PHASE_UNIT_TESTS"
|
|
197
|
+
set_from_yaml "$file" "phases.api_tests" "LOKI_PHASE_API_TESTS"
|
|
198
|
+
set_from_yaml "$file" "phases.e2e_tests" "LOKI_PHASE_E2E_TESTS"
|
|
199
|
+
set_from_yaml "$file" "phases.security" "LOKI_PHASE_SECURITY"
|
|
200
|
+
set_from_yaml "$file" "phases.integration" "LOKI_PHASE_INTEGRATION"
|
|
201
|
+
set_from_yaml "$file" "phases.code_review" "LOKI_PHASE_CODE_REVIEW"
|
|
202
|
+
set_from_yaml "$file" "phases.web_research" "LOKI_PHASE_WEB_RESEARCH"
|
|
203
|
+
set_from_yaml "$file" "phases.performance" "LOKI_PHASE_PERFORMANCE"
|
|
204
|
+
set_from_yaml "$file" "phases.accessibility" "LOKI_PHASE_ACCESSIBILITY"
|
|
205
|
+
set_from_yaml "$file" "phases.regression" "LOKI_PHASE_REGRESSION"
|
|
206
|
+
set_from_yaml "$file" "phases.uat" "LOKI_PHASE_UAT"
|
|
207
|
+
|
|
208
|
+
# Completion
|
|
209
|
+
set_from_yaml "$file" "completion.promise" "LOKI_COMPLETION_PROMISE"
|
|
210
|
+
set_from_yaml "$file" "completion.max_iterations" "LOKI_MAX_ITERATIONS"
|
|
211
|
+
set_from_yaml "$file" "completion.perpetual_mode" "LOKI_PERPETUAL_MODE"
|
|
212
|
+
|
|
213
|
+
# Model
|
|
214
|
+
set_from_yaml "$file" "model.prompt_repetition" "LOKI_PROMPT_REPETITION"
|
|
215
|
+
set_from_yaml "$file" "model.confidence_routing" "LOKI_CONFIDENCE_ROUTING"
|
|
216
|
+
set_from_yaml "$file" "model.autonomy_mode" "LOKI_AUTONOMY_MODE"
|
|
217
|
+
set_from_yaml "$file" "model.compaction_interval" "LOKI_COMPACTION_INTERVAL"
|
|
218
|
+
|
|
219
|
+
# Parallel
|
|
220
|
+
set_from_yaml "$file" "parallel.enabled" "LOKI_PARALLEL_MODE"
|
|
221
|
+
set_from_yaml "$file" "parallel.max_worktrees" "LOKI_MAX_WORKTREES"
|
|
222
|
+
set_from_yaml "$file" "parallel.max_sessions" "LOKI_MAX_PARALLEL_SESSIONS"
|
|
223
|
+
set_from_yaml "$file" "parallel.testing" "LOKI_PARALLEL_TESTING"
|
|
224
|
+
set_from_yaml "$file" "parallel.docs" "LOKI_PARALLEL_DOCS"
|
|
225
|
+
set_from_yaml "$file" "parallel.blog" "LOKI_PARALLEL_BLOG"
|
|
226
|
+
set_from_yaml "$file" "parallel.auto_merge" "LOKI_AUTO_MERGE"
|
|
227
|
+
|
|
228
|
+
# Complexity
|
|
229
|
+
set_from_yaml "$file" "complexity.tier" "LOKI_COMPLEXITY"
|
|
230
|
+
|
|
231
|
+
# GitHub
|
|
232
|
+
set_from_yaml "$file" "github.import" "LOKI_GITHUB_IMPORT"
|
|
233
|
+
set_from_yaml "$file" "github.pr" "LOKI_GITHUB_PR"
|
|
234
|
+
set_from_yaml "$file" "github.sync" "LOKI_GITHUB_SYNC"
|
|
235
|
+
set_from_yaml "$file" "github.repo" "LOKI_GITHUB_REPO"
|
|
236
|
+
set_from_yaml "$file" "github.labels" "LOKI_GITHUB_LABELS"
|
|
237
|
+
set_from_yaml "$file" "github.milestone" "LOKI_GITHUB_MILESTONE"
|
|
238
|
+
set_from_yaml "$file" "github.assignee" "LOKI_GITHUB_ASSIGNEE"
|
|
239
|
+
set_from_yaml "$file" "github.limit" "LOKI_GITHUB_LIMIT"
|
|
240
|
+
set_from_yaml "$file" "github.pr_label" "LOKI_GITHUB_PR_LABEL"
|
|
241
|
+
|
|
242
|
+
# Notifications
|
|
243
|
+
set_from_yaml "$file" "notifications.enabled" "LOKI_NOTIFICATIONS"
|
|
244
|
+
set_from_yaml "$file" "notifications.sound" "LOKI_NOTIFICATION_SOUND"
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
# Validate YAML value to prevent injection attacks
|
|
248
|
+
validate_yaml_value() {
|
|
249
|
+
local value="$1"
|
|
250
|
+
local max_length="${2:-1000}"
|
|
251
|
+
|
|
252
|
+
# Reject empty values
|
|
253
|
+
if [ -z "$value" ]; then
|
|
254
|
+
return 1
|
|
255
|
+
fi
|
|
256
|
+
|
|
257
|
+
# Reject values with dangerous shell metacharacters
|
|
258
|
+
# Allow alphanumeric, spaces, dots, dashes, underscores, slashes, colons, commas, @
|
|
259
|
+
if [[ "$value" =~ [\$\`\|\;\&\>\<\(\)\{\}\[\]\\] ]]; then
|
|
260
|
+
return 1
|
|
261
|
+
fi
|
|
262
|
+
|
|
263
|
+
# Reject values that are too long (DoS protection)
|
|
264
|
+
if [ "${#value}" -gt "$max_length" ]; then
|
|
265
|
+
return 1
|
|
266
|
+
fi
|
|
267
|
+
|
|
268
|
+
# Reject values with newlines (could corrupt variables)
|
|
269
|
+
if [[ "$value" == *$'\n'* ]]; then
|
|
270
|
+
return 1
|
|
271
|
+
fi
|
|
272
|
+
|
|
273
|
+
return 0
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
# Escape regex metacharacters for safe grep usage
|
|
277
|
+
escape_regex() {
|
|
278
|
+
local input="$1"
|
|
279
|
+
# Escape: . * ? + [ ] ^ $ { } | ( ) \
|
|
280
|
+
printf '%s' "$input" | sed 's/[.[\*?+^${}|()\\]/\\&/g'
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
# Helper: Extract value from YAML and set env var if not already set
|
|
284
|
+
set_from_yaml() {
|
|
285
|
+
local file="$1"
|
|
286
|
+
local yaml_path="$2"
|
|
287
|
+
local env_var="$3"
|
|
288
|
+
|
|
289
|
+
# Skip if env var is already set
|
|
290
|
+
if [ -n "${!env_var:-}" ]; then
|
|
291
|
+
return 0
|
|
292
|
+
fi
|
|
293
|
+
|
|
294
|
+
# Extract value using grep and sed (handles simple YAML)
|
|
295
|
+
# Convert yaml path like "core.max_retries" to search pattern
|
|
296
|
+
local value=""
|
|
297
|
+
local key="${yaml_path##*.}" # Get last part of path
|
|
298
|
+
|
|
299
|
+
# Escape regex metacharacters in key for safe grep
|
|
300
|
+
local escaped_key
|
|
301
|
+
escaped_key=$(escape_regex "$key")
|
|
302
|
+
|
|
303
|
+
# Simple grep for the key (works for flat or indented YAML)
|
|
304
|
+
# Use read to avoid xargs command execution risks
|
|
305
|
+
value=$(grep -E "^\s*${escaped_key}:" "$file" 2>/dev/null | head -1 | sed -E 's/.*:\s*//' | sed 's/#.*//' | sed 's/^["\x27]//;s/["\x27]$//' | tr -d '\n' | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
|
306
|
+
|
|
307
|
+
# Validate value before export (security check)
|
|
308
|
+
if [ -n "$value" ] && [ "$value" != "null" ] && validate_yaml_value "$value"; then
|
|
309
|
+
export "$env_var=$value"
|
|
310
|
+
fi
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
# Parse YAML using yq (proper parser)
|
|
314
|
+
parse_yaml_with_yq() {
|
|
315
|
+
local file="$1"
|
|
316
|
+
local mappings=(
|
|
317
|
+
"core.max_retries:LOKI_MAX_RETRIES"
|
|
318
|
+
"core.base_wait:LOKI_BASE_WAIT"
|
|
319
|
+
"core.max_wait:LOKI_MAX_WAIT"
|
|
320
|
+
"core.skip_prereqs:LOKI_SKIP_PREREQS"
|
|
321
|
+
"dashboard.enabled:LOKI_DASHBOARD"
|
|
322
|
+
"dashboard.port:LOKI_DASHBOARD_PORT"
|
|
323
|
+
"resources.check_interval:LOKI_RESOURCE_CHECK_INTERVAL"
|
|
324
|
+
"resources.cpu_threshold:LOKI_RESOURCE_CPU_THRESHOLD"
|
|
325
|
+
"resources.mem_threshold:LOKI_RESOURCE_MEM_THRESHOLD"
|
|
326
|
+
"security.staged_autonomy:LOKI_STAGED_AUTONOMY"
|
|
327
|
+
"security.audit_log:LOKI_AUDIT_LOG"
|
|
328
|
+
"security.max_parallel_agents:LOKI_MAX_PARALLEL_AGENTS"
|
|
329
|
+
"security.sandbox_mode:LOKI_SANDBOX_MODE"
|
|
330
|
+
"security.allowed_paths:LOKI_ALLOWED_PATHS"
|
|
331
|
+
"security.blocked_commands:LOKI_BLOCKED_COMMANDS"
|
|
332
|
+
"phases.unit_tests:LOKI_PHASE_UNIT_TESTS"
|
|
333
|
+
"phases.api_tests:LOKI_PHASE_API_TESTS"
|
|
334
|
+
"phases.e2e_tests:LOKI_PHASE_E2E_TESTS"
|
|
335
|
+
"phases.security:LOKI_PHASE_SECURITY"
|
|
336
|
+
"phases.integration:LOKI_PHASE_INTEGRATION"
|
|
337
|
+
"phases.code_review:LOKI_PHASE_CODE_REVIEW"
|
|
338
|
+
"phases.web_research:LOKI_PHASE_WEB_RESEARCH"
|
|
339
|
+
"phases.performance:LOKI_PHASE_PERFORMANCE"
|
|
340
|
+
"phases.accessibility:LOKI_PHASE_ACCESSIBILITY"
|
|
341
|
+
"phases.regression:LOKI_PHASE_REGRESSION"
|
|
342
|
+
"phases.uat:LOKI_PHASE_UAT"
|
|
343
|
+
"completion.promise:LOKI_COMPLETION_PROMISE"
|
|
344
|
+
"completion.max_iterations:LOKI_MAX_ITERATIONS"
|
|
345
|
+
"completion.perpetual_mode:LOKI_PERPETUAL_MODE"
|
|
346
|
+
"model.prompt_repetition:LOKI_PROMPT_REPETITION"
|
|
347
|
+
"model.confidence_routing:LOKI_CONFIDENCE_ROUTING"
|
|
348
|
+
"model.autonomy_mode:LOKI_AUTONOMY_MODE"
|
|
349
|
+
"model.compaction_interval:LOKI_COMPACTION_INTERVAL"
|
|
350
|
+
"parallel.enabled:LOKI_PARALLEL_MODE"
|
|
351
|
+
"parallel.max_worktrees:LOKI_MAX_WORKTREES"
|
|
352
|
+
"parallel.max_sessions:LOKI_MAX_PARALLEL_SESSIONS"
|
|
353
|
+
"parallel.testing:LOKI_PARALLEL_TESTING"
|
|
354
|
+
"parallel.docs:LOKI_PARALLEL_DOCS"
|
|
355
|
+
"parallel.blog:LOKI_PARALLEL_BLOG"
|
|
356
|
+
"parallel.auto_merge:LOKI_AUTO_MERGE"
|
|
357
|
+
"complexity.tier:LOKI_COMPLEXITY"
|
|
358
|
+
"github.import:LOKI_GITHUB_IMPORT"
|
|
359
|
+
"github.pr:LOKI_GITHUB_PR"
|
|
360
|
+
"github.sync:LOKI_GITHUB_SYNC"
|
|
361
|
+
"github.repo:LOKI_GITHUB_REPO"
|
|
362
|
+
"github.labels:LOKI_GITHUB_LABELS"
|
|
363
|
+
"github.milestone:LOKI_GITHUB_MILESTONE"
|
|
364
|
+
"github.assignee:LOKI_GITHUB_ASSIGNEE"
|
|
365
|
+
"github.limit:LOKI_GITHUB_LIMIT"
|
|
366
|
+
"github.pr_label:LOKI_GITHUB_PR_LABEL"
|
|
367
|
+
"notifications.enabled:LOKI_NOTIFICATIONS"
|
|
368
|
+
"notifications.sound:LOKI_NOTIFICATION_SOUND"
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
for mapping in "${mappings[@]}"; do
|
|
372
|
+
local yaml_path="${mapping%%:*}"
|
|
373
|
+
local env_var="${mapping##*:}"
|
|
374
|
+
|
|
375
|
+
# Skip if env var is already set
|
|
376
|
+
if [ -n "${!env_var:-}" ]; then
|
|
377
|
+
continue
|
|
378
|
+
fi
|
|
379
|
+
|
|
380
|
+
# Extract value using yq
|
|
381
|
+
local value
|
|
382
|
+
value=$(yq eval ".$yaml_path // \"\"" "$file" 2>/dev/null)
|
|
383
|
+
|
|
384
|
+
# Set env var if value found and not empty/null
|
|
385
|
+
# Also validate for security (prevent injection)
|
|
386
|
+
if [ -n "$value" ] && [ "$value" != "null" ] && [ "$value" != "" ] && validate_yaml_value "$value"; then
|
|
387
|
+
export "$env_var=$value"
|
|
388
|
+
fi
|
|
389
|
+
done
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
# Load config file before setting defaults
|
|
393
|
+
load_config_file
|
|
394
|
+
|
|
395
|
+
# Configuration
|
|
396
|
+
MAX_RETRIES=${LOKI_MAX_RETRIES:-50}
|
|
397
|
+
BASE_WAIT=${LOKI_BASE_WAIT:-60}
|
|
398
|
+
MAX_WAIT=${LOKI_MAX_WAIT:-3600}
|
|
399
|
+
SKIP_PREREQS=${LOKI_SKIP_PREREQS:-false}
|
|
400
|
+
ENABLE_DASHBOARD=${LOKI_DASHBOARD:-true}
|
|
401
|
+
DASHBOARD_PORT=${LOKI_DASHBOARD_PORT:-57374}
|
|
402
|
+
RESOURCE_CHECK_INTERVAL=${LOKI_RESOURCE_CHECK_INTERVAL:-300} # Check every 5 minutes
|
|
403
|
+
RESOURCE_CPU_THRESHOLD=${LOKI_RESOURCE_CPU_THRESHOLD:-80} # CPU % threshold
|
|
404
|
+
RESOURCE_MEM_THRESHOLD=${LOKI_RESOURCE_MEM_THRESHOLD:-80} # Memory % threshold
|
|
405
|
+
|
|
406
|
+
# Security & Autonomy Controls
|
|
407
|
+
STAGED_AUTONOMY=${LOKI_STAGED_AUTONOMY:-false} # Require plan approval
|
|
408
|
+
AUDIT_LOG_ENABLED=${LOKI_AUDIT_LOG:-false} # Enable audit logging
|
|
409
|
+
MAX_PARALLEL_AGENTS=${LOKI_MAX_PARALLEL_AGENTS:-10} # Limit concurrent agents
|
|
410
|
+
SANDBOX_MODE=${LOKI_SANDBOX_MODE:-false} # Docker sandbox mode
|
|
411
|
+
ALLOWED_PATHS=${LOKI_ALLOWED_PATHS:-""} # Empty = all paths allowed
|
|
412
|
+
BLOCKED_COMMANDS=${LOKI_BLOCKED_COMMANDS:-"rm -rf /,dd if=,mkfs,:(){ :|:& };:"}
|
|
413
|
+
|
|
414
|
+
STATUS_MONITOR_PID=""
|
|
415
|
+
DASHBOARD_PID=""
|
|
416
|
+
RESOURCE_MONITOR_PID=""
|
|
417
|
+
|
|
418
|
+
# SDLC Phase Controls (all enabled by default)
|
|
419
|
+
PHASE_UNIT_TESTS=${LOKI_PHASE_UNIT_TESTS:-true}
|
|
420
|
+
PHASE_API_TESTS=${LOKI_PHASE_API_TESTS:-true}
|
|
421
|
+
PHASE_E2E_TESTS=${LOKI_PHASE_E2E_TESTS:-true}
|
|
422
|
+
PHASE_SECURITY=${LOKI_PHASE_SECURITY:-true}
|
|
423
|
+
PHASE_INTEGRATION=${LOKI_PHASE_INTEGRATION:-true}
|
|
424
|
+
PHASE_CODE_REVIEW=${LOKI_PHASE_CODE_REVIEW:-true}
|
|
425
|
+
PHASE_WEB_RESEARCH=${LOKI_PHASE_WEB_RESEARCH:-true}
|
|
426
|
+
PHASE_PERFORMANCE=${LOKI_PHASE_PERFORMANCE:-true}
|
|
427
|
+
PHASE_ACCESSIBILITY=${LOKI_PHASE_ACCESSIBILITY:-true}
|
|
428
|
+
PHASE_REGRESSION=${LOKI_PHASE_REGRESSION:-true}
|
|
429
|
+
PHASE_UAT=${LOKI_PHASE_UAT:-true}
|
|
430
|
+
|
|
431
|
+
# Autonomous Loop Controls (Ralph Wiggum Mode)
|
|
432
|
+
# Default: No auto-completion - runs until max iterations or explicit promise
|
|
433
|
+
COMPLETION_PROMISE=${LOKI_COMPLETION_PROMISE:-""}
|
|
434
|
+
MAX_ITERATIONS=${LOKI_MAX_ITERATIONS:-1000}
|
|
435
|
+
ITERATION_COUNT=0
|
|
436
|
+
# Perpetual mode: never stop unless max iterations (ignores all completion signals)
|
|
437
|
+
PERPETUAL_MODE=${LOKI_PERPETUAL_MODE:-false}
|
|
438
|
+
|
|
439
|
+
# 2026 Research Enhancements (minimal additions)
|
|
440
|
+
PROMPT_REPETITION=${LOKI_PROMPT_REPETITION:-true}
|
|
441
|
+
CONFIDENCE_ROUTING=${LOKI_CONFIDENCE_ROUTING:-true}
|
|
442
|
+
AUTONOMY_MODE=${LOKI_AUTONOMY_MODE:-perpetual} # perpetual|checkpoint|supervised
|
|
443
|
+
|
|
444
|
+
# Proactive Context Management (OpenCode/Sisyphus pattern, validated by Opus)
|
|
445
|
+
COMPACTION_INTERVAL=${LOKI_COMPACTION_INTERVAL:-25} # Suggest compaction every N iterations
|
|
446
|
+
|
|
447
|
+
# Parallel Workflows (Git Worktrees)
|
|
448
|
+
PARALLEL_MODE=${LOKI_PARALLEL_MODE:-false}
|
|
449
|
+
MAX_WORKTREES=${LOKI_MAX_WORKTREES:-5}
|
|
450
|
+
MAX_PARALLEL_SESSIONS=${LOKI_MAX_PARALLEL_SESSIONS:-3}
|
|
451
|
+
PARALLEL_TESTING=${LOKI_PARALLEL_TESTING:-true}
|
|
452
|
+
PARALLEL_DOCS=${LOKI_PARALLEL_DOCS:-true}
|
|
453
|
+
PARALLEL_BLOG=${LOKI_PARALLEL_BLOG:-false}
|
|
454
|
+
AUTO_MERGE=${LOKI_AUTO_MERGE:-true}
|
|
455
|
+
|
|
456
|
+
# Complexity Tiers (Auto-Claude pattern)
|
|
457
|
+
# auto = detect from PRD/codebase, simple = 3 phases, standard = 6 phases, complex = 8 phases
|
|
458
|
+
COMPLEXITY_TIER=${LOKI_COMPLEXITY:-auto}
|
|
459
|
+
DETECTED_COMPLEXITY=""
|
|
460
|
+
|
|
461
|
+
# Track worktree PIDs for cleanup (requires bash 4+ for associative arrays)
|
|
462
|
+
# Check bash version for parallel mode compatibility
|
|
463
|
+
BASH_VERSION_MAJOR="${BASH_VERSION%%.*}"
|
|
464
|
+
if [ "$BASH_VERSION_MAJOR" -ge 4 ] 2>/dev/null; then
|
|
465
|
+
declare -A WORKTREE_PIDS
|
|
466
|
+
declare -A WORKTREE_PATHS
|
|
467
|
+
else
|
|
468
|
+
# Fallback: parallel mode will check and warn
|
|
469
|
+
WORKTREE_PIDS=""
|
|
470
|
+
WORKTREE_PATHS=""
|
|
471
|
+
fi
|
|
472
|
+
|
|
473
|
+
# Colors
|
|
474
|
+
RED='\033[0;31m'
|
|
475
|
+
GREEN='\033[0;32m'
|
|
476
|
+
YELLOW='\033[1;33m'
|
|
477
|
+
BLUE='\033[0;34m'
|
|
478
|
+
CYAN='\033[0;36m'
|
|
479
|
+
BOLD='\033[1m'
|
|
480
|
+
NC='\033[0m'
|
|
481
|
+
|
|
482
|
+
#===============================================================================
|
|
483
|
+
# Logging Functions
|
|
484
|
+
#===============================================================================
|
|
485
|
+
|
|
486
|
+
log_header() {
|
|
487
|
+
echo ""
|
|
488
|
+
echo -e "${BLUE}╔════════════════════════════════════════════════════════════════╗${NC}"
|
|
489
|
+
echo -e "${BLUE}║${NC} ${BOLD}$1${NC}"
|
|
490
|
+
echo -e "${BLUE}╚════════════════════════════════════════════════════════════════╝${NC}"
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
log_info() { echo -e "${GREEN}[INFO]${NC} $*"; }
|
|
494
|
+
log_warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
|
|
495
|
+
log_warning() { log_warn "$@"; } # Alias for backwards compatibility
|
|
496
|
+
log_error() { echo -e "${RED}[ERROR]${NC} $*"; }
|
|
497
|
+
log_step() { echo -e "${CYAN}[STEP]${NC} $*"; }
|
|
498
|
+
|
|
499
|
+
#===============================================================================
|
|
500
|
+
# Complexity Tier Detection (Auto-Claude pattern)
|
|
501
|
+
#===============================================================================
|
|
502
|
+
|
|
503
|
+
# Detect project complexity from PRD and codebase
|
|
504
|
+
detect_complexity() {
|
|
505
|
+
local prd_path="${1:-}"
|
|
506
|
+
local target_dir="${TARGET_DIR:-.}"
|
|
507
|
+
|
|
508
|
+
# If forced, use that
|
|
509
|
+
if [ "$COMPLEXITY_TIER" != "auto" ]; then
|
|
510
|
+
DETECTED_COMPLEXITY="$COMPLEXITY_TIER"
|
|
511
|
+
return 0
|
|
512
|
+
fi
|
|
513
|
+
|
|
514
|
+
# Count files in project (excluding common non-source dirs)
|
|
515
|
+
local file_count=$(find "$target_dir" -type f \
|
|
516
|
+
\( -name "*.ts" -o -name "*.tsx" -o -name "*.js" -o -name "*.jsx" \
|
|
517
|
+
-o -name "*.py" -o -name "*.go" -o -name "*.rs" -o -name "*.java" \
|
|
518
|
+
-o -name "*.rb" -o -name "*.php" -o -name "*.swift" -o -name "*.kt" \) \
|
|
519
|
+
! -path "*/node_modules/*" ! -path "*/.git/*" ! -path "*/vendor/*" \
|
|
520
|
+
! -path "*/dist/*" ! -path "*/build/*" ! -path "*/__pycache__/*" \
|
|
521
|
+
2>/dev/null | wc -l | tr -d ' ')
|
|
522
|
+
|
|
523
|
+
# Check for external integrations
|
|
524
|
+
local has_external=false
|
|
525
|
+
if grep -rq "oauth\|SAML\|OIDC\|stripe\|twilio\|aws-sdk\|@google-cloud\|azure" \
|
|
526
|
+
"$target_dir" --include="*.json" --include="*.ts" --include="*.js" 2>/dev/null; then
|
|
527
|
+
has_external=true
|
|
528
|
+
fi
|
|
529
|
+
|
|
530
|
+
# Check for multiple services (docker-compose, k8s)
|
|
531
|
+
local has_microservices=false
|
|
532
|
+
if [ -f "$target_dir/docker-compose.yml" ] || [ -d "$target_dir/k8s" ] || \
|
|
533
|
+
[ -f "$target_dir/docker-compose.yaml" ]; then
|
|
534
|
+
has_microservices=true
|
|
535
|
+
fi
|
|
536
|
+
|
|
537
|
+
# Analyze PRD if provided
|
|
538
|
+
local prd_complexity="standard"
|
|
539
|
+
if [ -n "$prd_path" ] && [ -f "$prd_path" ]; then
|
|
540
|
+
local prd_words=$(wc -w < "$prd_path" | tr -d ' ')
|
|
541
|
+
local feature_count=$(grep -c "^##\|^- \[" "$prd_path" 2>/dev/null || echo "0")
|
|
542
|
+
|
|
543
|
+
if [ "$prd_words" -lt 200 ] && [ "$feature_count" -lt 5 ]; then
|
|
544
|
+
prd_complexity="simple"
|
|
545
|
+
elif [ "$prd_words" -gt 1000 ] || [ "$feature_count" -gt 15 ]; then
|
|
546
|
+
prd_complexity="complex"
|
|
547
|
+
fi
|
|
548
|
+
fi
|
|
549
|
+
|
|
550
|
+
# Determine final complexity
|
|
551
|
+
if [ "$file_count" -le 5 ] && [ "$prd_complexity" = "simple" ] && \
|
|
552
|
+
[ "$has_external" = "false" ] && [ "$has_microservices" = "false" ]; then
|
|
553
|
+
DETECTED_COMPLEXITY="simple"
|
|
554
|
+
elif [ "$file_count" -gt 50 ] || [ "$has_microservices" = "true" ] || \
|
|
555
|
+
[ "$has_external" = "true" ] || [ "$prd_complexity" = "complex" ]; then
|
|
556
|
+
DETECTED_COMPLEXITY="complex"
|
|
557
|
+
else
|
|
558
|
+
DETECTED_COMPLEXITY="standard"
|
|
559
|
+
fi
|
|
560
|
+
|
|
561
|
+
log_info "Detected complexity: $DETECTED_COMPLEXITY (files: $file_count, external: $has_external, microservices: $has_microservices)"
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
# Get phases based on complexity tier
|
|
565
|
+
get_complexity_phases() {
|
|
566
|
+
case "$DETECTED_COMPLEXITY" in
|
|
567
|
+
simple)
|
|
568
|
+
echo "3"
|
|
569
|
+
;;
|
|
570
|
+
standard)
|
|
571
|
+
echo "6"
|
|
572
|
+
;;
|
|
573
|
+
complex)
|
|
574
|
+
echo "8"
|
|
575
|
+
;;
|
|
576
|
+
*)
|
|
577
|
+
echo "6" # Default to standard
|
|
578
|
+
;;
|
|
579
|
+
esac
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
# Get phase names based on complexity tier
|
|
583
|
+
get_phase_names() {
|
|
584
|
+
case "$DETECTED_COMPLEXITY" in
|
|
585
|
+
simple)
|
|
586
|
+
echo "IMPLEMENT TEST DEPLOY"
|
|
587
|
+
;;
|
|
588
|
+
standard)
|
|
589
|
+
echo "RESEARCH DESIGN IMPLEMENT TEST REVIEW DEPLOY"
|
|
590
|
+
;;
|
|
591
|
+
complex)
|
|
592
|
+
echo "RESEARCH ARCHITECTURE DESIGN IMPLEMENT TEST REVIEW SECURITY DEPLOY"
|
|
593
|
+
;;
|
|
594
|
+
*)
|
|
595
|
+
echo "RESEARCH DESIGN IMPLEMENT TEST REVIEW DEPLOY"
|
|
596
|
+
;;
|
|
597
|
+
esac
|
|
598
|
+
}
|
|
599
|
+
|
|
600
|
+
#===============================================================================
|
|
601
|
+
# GitHub Integration Functions (v4.1.0)
|
|
602
|
+
#===============================================================================
|
|
603
|
+
|
|
604
|
+
# GitHub integration settings
|
|
605
|
+
GITHUB_IMPORT=${LOKI_GITHUB_IMPORT:-false}
|
|
606
|
+
GITHUB_PR=${LOKI_GITHUB_PR:-false}
|
|
607
|
+
GITHUB_SYNC=${LOKI_GITHUB_SYNC:-false}
|
|
608
|
+
GITHUB_REPO=${LOKI_GITHUB_REPO:-""}
|
|
609
|
+
GITHUB_LABELS=${LOKI_GITHUB_LABELS:-""}
|
|
610
|
+
GITHUB_MILESTONE=${LOKI_GITHUB_MILESTONE:-""}
|
|
611
|
+
GITHUB_ASSIGNEE=${LOKI_GITHUB_ASSIGNEE:-""}
|
|
612
|
+
GITHUB_LIMIT=${LOKI_GITHUB_LIMIT:-100}
|
|
613
|
+
GITHUB_PR_LABEL=${LOKI_GITHUB_PR_LABEL:-""}
|
|
614
|
+
|
|
615
|
+
# Check if gh CLI is available and authenticated
|
|
616
|
+
check_github_cli() {
|
|
617
|
+
if ! command -v gh &> /dev/null; then
|
|
618
|
+
log_warn "gh CLI not found. Install with: brew install gh"
|
|
619
|
+
return 1
|
|
620
|
+
fi
|
|
621
|
+
|
|
622
|
+
if ! gh auth status &> /dev/null; then
|
|
623
|
+
log_warn "gh CLI not authenticated. Run: gh auth login"
|
|
624
|
+
return 1
|
|
625
|
+
fi
|
|
626
|
+
|
|
627
|
+
return 0
|
|
628
|
+
}
|
|
629
|
+
|
|
630
|
+
# Get current repo from git remote or LOKI_GITHUB_REPO
|
|
631
|
+
get_github_repo() {
|
|
632
|
+
if [ -n "$GITHUB_REPO" ]; then
|
|
633
|
+
echo "$GITHUB_REPO"
|
|
634
|
+
return
|
|
635
|
+
fi
|
|
636
|
+
|
|
637
|
+
# Try to detect from git remote
|
|
638
|
+
local remote_url
|
|
639
|
+
remote_url=$(git remote get-url origin 2>/dev/null || echo "")
|
|
640
|
+
|
|
641
|
+
if [ -z "$remote_url" ]; then
|
|
642
|
+
return 1
|
|
643
|
+
fi
|
|
644
|
+
|
|
645
|
+
# Extract owner/repo from various URL formats
|
|
646
|
+
# https://github.com/owner/repo.git
|
|
647
|
+
# git@github.com:owner/repo.git
|
|
648
|
+
local repo
|
|
649
|
+
repo=$(echo "$remote_url" | sed -E 's/.*github.com[:/]([^/]+\/[^/]+)(\.git)?$/\1/')
|
|
650
|
+
repo="${repo%.git}"
|
|
651
|
+
|
|
652
|
+
if [ -n "$repo" ] && [[ "$repo" == *"/"* ]]; then
|
|
653
|
+
echo "$repo"
|
|
654
|
+
return 0
|
|
655
|
+
fi
|
|
656
|
+
|
|
657
|
+
return 1
|
|
658
|
+
}
|
|
659
|
+
|
|
660
|
+
# Import issues from GitHub as tasks
|
|
661
|
+
import_github_issues() {
|
|
662
|
+
if [ "$GITHUB_IMPORT" != "true" ]; then
|
|
663
|
+
return 0
|
|
664
|
+
fi
|
|
665
|
+
|
|
666
|
+
if ! check_github_cli; then
|
|
667
|
+
return 1
|
|
668
|
+
fi
|
|
669
|
+
|
|
670
|
+
local repo
|
|
671
|
+
repo=$(get_github_repo)
|
|
672
|
+
if [ -z "$repo" ]; then
|
|
673
|
+
log_error "Could not determine GitHub repo. Set LOKI_GITHUB_REPO=owner/repo"
|
|
674
|
+
return 1
|
|
675
|
+
fi
|
|
676
|
+
|
|
677
|
+
log_info "Importing issues from GitHub: $repo"
|
|
678
|
+
|
|
679
|
+
# Build gh issue list command with filters
|
|
680
|
+
local gh_args=("issue" "list" "--repo" "$repo" "--state" "open" "--limit" "$GITHUB_LIMIT" "--json" "number,title,body,labels,url,milestone,assignees")
|
|
681
|
+
|
|
682
|
+
if [ -n "$GITHUB_LABELS" ]; then
|
|
683
|
+
IFS=',' read -ra LABELS <<< "$GITHUB_LABELS"
|
|
684
|
+
for label in "${LABELS[@]}"; do
|
|
685
|
+
# Trim whitespace from label
|
|
686
|
+
label=$(echo "$label" | xargs)
|
|
687
|
+
gh_args+=("--label" "$label")
|
|
688
|
+
done
|
|
689
|
+
fi
|
|
690
|
+
|
|
691
|
+
if [ -n "$GITHUB_MILESTONE" ]; then
|
|
692
|
+
gh_args+=("--milestone" "$GITHUB_MILESTONE")
|
|
693
|
+
fi
|
|
694
|
+
|
|
695
|
+
if [ -n "$GITHUB_ASSIGNEE" ]; then
|
|
696
|
+
gh_args+=("--assignee" "$GITHUB_ASSIGNEE")
|
|
697
|
+
fi
|
|
698
|
+
|
|
699
|
+
# Fetch issues with error capture
|
|
700
|
+
local issues gh_error
|
|
701
|
+
if ! issues=$(gh "${gh_args[@]}" 2>&1); then
|
|
702
|
+
gh_error="$issues"
|
|
703
|
+
if echo "$gh_error" | grep -q "rate limit"; then
|
|
704
|
+
log_error "GitHub API rate limit exceeded. Wait and retry."
|
|
705
|
+
else
|
|
706
|
+
log_error "Failed to fetch issues: $gh_error"
|
|
707
|
+
fi
|
|
708
|
+
return 1
|
|
709
|
+
fi
|
|
710
|
+
|
|
711
|
+
if [ -z "$issues" ] || [ "$issues" == "[]" ]; then
|
|
712
|
+
log_info "No open issues found matching filters"
|
|
713
|
+
return 0
|
|
714
|
+
fi
|
|
715
|
+
|
|
716
|
+
# Convert issues to tasks
|
|
717
|
+
local pending_file=".loki/queue/pending.json"
|
|
718
|
+
local task_count=0
|
|
719
|
+
|
|
720
|
+
# Ensure pending.json exists
|
|
721
|
+
if [ ! -f "$pending_file" ]; then
|
|
722
|
+
echo '{"tasks":[]}' > "$pending_file"
|
|
723
|
+
fi
|
|
724
|
+
|
|
725
|
+
# Parse issues and add to pending queue
|
|
726
|
+
# Use process substitution to avoid subshell variable scope bug
|
|
727
|
+
while read -r issue; do
|
|
728
|
+
local number title body full_body url labels
|
|
729
|
+
number=$(echo "$issue" | jq -r '.number')
|
|
730
|
+
title=$(echo "$issue" | jq -r '.title')
|
|
731
|
+
full_body=$(echo "$issue" | jq -r '.body // ""')
|
|
732
|
+
# Truncate body with indicator if needed
|
|
733
|
+
if [ ${#full_body} -gt 500 ]; then
|
|
734
|
+
body="${full_body:0:497}..."
|
|
735
|
+
else
|
|
736
|
+
body="$full_body"
|
|
737
|
+
fi
|
|
738
|
+
url=$(echo "$issue" | jq -r '.url')
|
|
739
|
+
labels=$(echo "$issue" | jq -c '[.labels[].name]')
|
|
740
|
+
|
|
741
|
+
# Check if task already exists
|
|
742
|
+
if jq -e ".tasks[] | select(.github_issue == $number)" "$pending_file" &>/dev/null; then
|
|
743
|
+
log_info "Issue #$number already imported, skipping"
|
|
744
|
+
continue
|
|
745
|
+
fi
|
|
746
|
+
|
|
747
|
+
# Determine priority from labels
|
|
748
|
+
local priority="normal"
|
|
749
|
+
if echo "$labels" | grep -qE '"(priority:critical|P0)"'; then
|
|
750
|
+
priority="critical"
|
|
751
|
+
elif echo "$labels" | grep -qE '"(priority:high|P1)"'; then
|
|
752
|
+
priority="high"
|
|
753
|
+
elif echo "$labels" | grep -qE '"(priority:medium|P2)"'; then
|
|
754
|
+
priority="medium"
|
|
755
|
+
elif echo "$labels" | grep -qE '"(priority:low|P3)"'; then
|
|
756
|
+
priority="low"
|
|
757
|
+
fi
|
|
758
|
+
|
|
759
|
+
# Add task to pending queue
|
|
760
|
+
local task_id="github-$number"
|
|
761
|
+
local task_json
|
|
762
|
+
task_json=$(jq -n \
|
|
763
|
+
--arg id "$task_id" \
|
|
764
|
+
--arg title "$title" \
|
|
765
|
+
--arg desc "GitHub Issue #$number: $body" \
|
|
766
|
+
--argjson num "$number" \
|
|
767
|
+
--arg url "$url" \
|
|
768
|
+
--argjson labels "$labels" \
|
|
769
|
+
--arg priority "$priority" \
|
|
770
|
+
--arg created "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
|
|
771
|
+
'{
|
|
772
|
+
id: $id,
|
|
773
|
+
title: $title,
|
|
774
|
+
description: $desc,
|
|
775
|
+
source: "github",
|
|
776
|
+
github_issue: $num,
|
|
777
|
+
github_url: $url,
|
|
778
|
+
labels: $labels,
|
|
779
|
+
priority: $priority,
|
|
780
|
+
status: "pending",
|
|
781
|
+
created_at: $created
|
|
782
|
+
}')
|
|
783
|
+
|
|
784
|
+
# Append to pending.json with temp file cleanup on error
|
|
785
|
+
local temp_file
|
|
786
|
+
temp_file=$(mktemp)
|
|
787
|
+
trap "rm -f '$temp_file'" RETURN
|
|
788
|
+
if jq ".tasks += [$task_json]" "$pending_file" > "$temp_file" && mv "$temp_file" "$pending_file"; then
|
|
789
|
+
log_info "Imported issue #$number: $title"
|
|
790
|
+
task_count=$((task_count + 1))
|
|
791
|
+
else
|
|
792
|
+
log_warn "Failed to import issue #$number"
|
|
793
|
+
rm -f "$temp_file"
|
|
794
|
+
fi
|
|
795
|
+
done < <(echo "$issues" | jq -c '.[]')
|
|
796
|
+
|
|
797
|
+
log_info "Imported $task_count issues from GitHub"
|
|
798
|
+
}
|
|
799
|
+
|
|
800
|
+
# Create PR for completed feature
|
|
801
|
+
create_github_pr() {
|
|
802
|
+
local feature_name="$1"
|
|
803
|
+
local branch_name="${2:-$(git rev-parse --abbrev-ref HEAD)}"
|
|
804
|
+
|
|
805
|
+
if [ "$GITHUB_PR" != "true" ]; then
|
|
806
|
+
return 0
|
|
807
|
+
fi
|
|
808
|
+
|
|
809
|
+
if ! check_github_cli; then
|
|
810
|
+
return 1
|
|
811
|
+
fi
|
|
812
|
+
|
|
813
|
+
local repo
|
|
814
|
+
repo=$(get_github_repo)
|
|
815
|
+
if [ -z "$repo" ]; then
|
|
816
|
+
log_error "Could not determine GitHub repo"
|
|
817
|
+
return 1
|
|
818
|
+
fi
|
|
819
|
+
|
|
820
|
+
log_info "Creating PR for: $feature_name"
|
|
821
|
+
|
|
822
|
+
# Generate PR body from completed tasks
|
|
823
|
+
local pr_body=".loki/reports/pr-body.md"
|
|
824
|
+
mkdir -p "$(dirname "$pr_body")"
|
|
825
|
+
|
|
826
|
+
cat > "$pr_body" << EOF
|
|
827
|
+
## Summary
|
|
828
|
+
|
|
829
|
+
Automated implementation by Loki Mode v4.1.0
|
|
830
|
+
|
|
831
|
+
### Feature: $feature_name
|
|
832
|
+
|
|
833
|
+
### Tasks Completed
|
|
834
|
+
EOF
|
|
835
|
+
|
|
836
|
+
# Add completed tasks from ledger
|
|
837
|
+
if [ -f ".loki/ledger.json" ]; then
|
|
838
|
+
jq -r '.completed_tasks[]? | "- [x] \(.title // .id)"' .loki/ledger.json >> "$pr_body" 2>/dev/null || true
|
|
839
|
+
fi
|
|
840
|
+
|
|
841
|
+
cat >> "$pr_body" << EOF
|
|
842
|
+
|
|
843
|
+
### Quality Gates
|
|
844
|
+
- Static Analysis: $([ -f ".loki/quality/static-analysis.pass" ] && echo "PASS" || echo "PENDING")
|
|
845
|
+
- Unit Tests: $([ -f ".loki/quality/unit-tests.pass" ] && echo "PASS" || echo "PENDING")
|
|
846
|
+
- Code Review: $([ -f ".loki/quality/code-review.pass" ] && echo "PASS" || echo "PENDING")
|
|
847
|
+
|
|
848
|
+
### Related Issues
|
|
849
|
+
EOF
|
|
850
|
+
|
|
851
|
+
# Find related GitHub issues
|
|
852
|
+
if [ -f ".loki/ledger.json" ]; then
|
|
853
|
+
jq -r '.completed_tasks[]? | select(.github_issue) | "Closes #\(.github_issue)"' .loki/ledger.json >> "$pr_body" 2>/dev/null || true
|
|
854
|
+
fi
|
|
855
|
+
|
|
856
|
+
# Build PR create command
|
|
857
|
+
local pr_args=("pr" "create" "--repo" "$repo" "--title" "[Loki Mode] $feature_name" "--body-file" "$pr_body")
|
|
858
|
+
|
|
859
|
+
# Add label only if specified (avoids error if label doesn't exist)
|
|
860
|
+
if [ -n "$GITHUB_PR_LABEL" ]; then
|
|
861
|
+
pr_args+=("--label" "$GITHUB_PR_LABEL")
|
|
862
|
+
fi
|
|
863
|
+
|
|
864
|
+
# Create PR and capture output
|
|
865
|
+
local pr_url
|
|
866
|
+
if ! pr_url=$(gh "${pr_args[@]}" 2>&1); then
|
|
867
|
+
log_error "Failed to create PR: $pr_url"
|
|
868
|
+
return 1
|
|
869
|
+
fi
|
|
870
|
+
|
|
871
|
+
log_info "PR created: $pr_url"
|
|
872
|
+
}
|
|
873
|
+
|
|
874
|
+
# Sync task status to GitHub issue
|
|
875
|
+
sync_github_status() {
|
|
876
|
+
local task_id="$1"
|
|
877
|
+
local status="$2"
|
|
878
|
+
local message="${3:-}"
|
|
879
|
+
|
|
880
|
+
if [ "$GITHUB_SYNC" != "true" ]; then
|
|
881
|
+
return 0
|
|
882
|
+
fi
|
|
883
|
+
|
|
884
|
+
if ! check_github_cli; then
|
|
885
|
+
return 1
|
|
886
|
+
fi
|
|
887
|
+
|
|
888
|
+
# Extract issue number from task_id (format: github-123)
|
|
889
|
+
local issue_number
|
|
890
|
+
issue_number=$(echo "$task_id" | sed 's/github-//')
|
|
891
|
+
|
|
892
|
+
if ! [[ "$issue_number" =~ ^[0-9]+$ ]]; then
|
|
893
|
+
return 0 # Not a GitHub-sourced task
|
|
894
|
+
fi
|
|
895
|
+
|
|
896
|
+
local repo
|
|
897
|
+
repo=$(get_github_repo)
|
|
898
|
+
if [ -z "$repo" ]; then
|
|
899
|
+
return 1
|
|
900
|
+
fi
|
|
901
|
+
|
|
902
|
+
case "$status" in
|
|
903
|
+
"in_progress")
|
|
904
|
+
gh issue comment "$issue_number" --repo "$repo" \
|
|
905
|
+
--body "Loki Mode: Task in progress - ${message:-implementing solution...}" \
|
|
906
|
+
2>/dev/null || true
|
|
907
|
+
;;
|
|
908
|
+
"completed")
|
|
909
|
+
gh issue comment "$issue_number" --repo "$repo" \
|
|
910
|
+
--body "Loki Mode: Implementation complete. ${message:-}" \
|
|
911
|
+
2>/dev/null || true
|
|
912
|
+
;;
|
|
913
|
+
"closed")
|
|
914
|
+
gh issue close "$issue_number" --repo "$repo" \
|
|
915
|
+
--reason "completed" \
|
|
916
|
+
--comment "Loki Mode: Fixed. ${message:-}" \
|
|
917
|
+
2>/dev/null || true
|
|
918
|
+
;;
|
|
919
|
+
esac
|
|
920
|
+
}
|
|
921
|
+
|
|
922
|
+
# Export tasks to GitHub issues (reverse sync)
|
|
923
|
+
export_tasks_to_github() {
|
|
924
|
+
if ! check_github_cli; then
|
|
925
|
+
return 1
|
|
926
|
+
fi
|
|
927
|
+
|
|
928
|
+
local repo
|
|
929
|
+
repo=$(get_github_repo)
|
|
930
|
+
if [ -z "$repo" ]; then
|
|
931
|
+
log_error "Could not determine GitHub repo"
|
|
932
|
+
return 1
|
|
933
|
+
fi
|
|
934
|
+
|
|
935
|
+
local pending_file=".loki/queue/pending.json"
|
|
936
|
+
if [ ! -f "$pending_file" ]; then
|
|
937
|
+
log_warn "No pending tasks to export"
|
|
938
|
+
return 0
|
|
939
|
+
fi
|
|
940
|
+
|
|
941
|
+
# Export non-GitHub tasks as issues
|
|
942
|
+
jq -c '.tasks[] | select(.source != "github")' "$pending_file" 2>/dev/null | while read -r task; do
|
|
943
|
+
local title desc
|
|
944
|
+
title=$(echo "$task" | jq -r '.title')
|
|
945
|
+
desc=$(echo "$task" | jq -r '.description // ""')
|
|
946
|
+
|
|
947
|
+
log_info "Creating issue: $title"
|
|
948
|
+
gh issue create --repo "$repo" \
|
|
949
|
+
--title "$title" \
|
|
950
|
+
--body "$desc" \
|
|
951
|
+
--label "loki-mode" \
|
|
952
|
+
2>/dev/null || log_warn "Failed to create issue: $title"
|
|
953
|
+
done
|
|
954
|
+
}
|
|
955
|
+
|
|
956
|
+
#===============================================================================
|
|
957
|
+
# Desktop Notifications (v4.1.0)
|
|
958
|
+
#===============================================================================
|
|
959
|
+
|
|
960
|
+
# Notification settings
|
|
961
|
+
NOTIFICATIONS_ENABLED=${LOKI_NOTIFICATIONS:-true}
|
|
962
|
+
NOTIFICATION_SOUND=${LOKI_NOTIFICATION_SOUND:-true}
|
|
963
|
+
|
|
964
|
+
# Send desktop notification (cross-platform)
|
|
965
|
+
send_notification() {
|
|
966
|
+
local title="$1"
|
|
967
|
+
local message="$2"
|
|
968
|
+
local urgency="${3:-normal}" # low, normal, critical
|
|
969
|
+
|
|
970
|
+
if [ "$NOTIFICATIONS_ENABLED" != "true" ]; then
|
|
971
|
+
return 0
|
|
972
|
+
fi
|
|
973
|
+
|
|
974
|
+
# Validate inputs - skip empty notifications
|
|
975
|
+
if [ -z "$title" ] && [ -z "$message" ]; then
|
|
976
|
+
return 0
|
|
977
|
+
fi
|
|
978
|
+
title="${title:-Notification}" # Default title if empty
|
|
979
|
+
|
|
980
|
+
# macOS: use osascript
|
|
981
|
+
if command -v osascript &> /dev/null; then
|
|
982
|
+
# Escape backslashes first, then double quotes for AppleScript
|
|
983
|
+
local escaped_title="${title//\\/\\\\}"
|
|
984
|
+
escaped_title="${escaped_title//\"/\\\"}"
|
|
985
|
+
local escaped_message="${message//\\/\\\\}"
|
|
986
|
+
escaped_message="${escaped_message//\"/\\\"}"
|
|
987
|
+
|
|
988
|
+
osascript -e "display notification \"$escaped_message\" with title \"Loki Mode\" subtitle \"$escaped_title\"" 2>/dev/null || true
|
|
989
|
+
|
|
990
|
+
# Play sound if enabled (low urgency intentionally silent)
|
|
991
|
+
if [ "$NOTIFICATION_SOUND" = "true" ]; then
|
|
992
|
+
case "$urgency" in
|
|
993
|
+
critical)
|
|
994
|
+
osascript -e 'beep 3' 2>/dev/null || true
|
|
995
|
+
;;
|
|
996
|
+
normal)
|
|
997
|
+
osascript -e 'beep' 2>/dev/null || true
|
|
998
|
+
;;
|
|
999
|
+
low)
|
|
1000
|
+
# Intentionally no sound for low urgency notifications
|
|
1001
|
+
;;
|
|
1002
|
+
esac
|
|
1003
|
+
fi
|
|
1004
|
+
return 0
|
|
1005
|
+
fi
|
|
1006
|
+
|
|
1007
|
+
# Linux: use notify-send
|
|
1008
|
+
if command -v notify-send &> /dev/null; then
|
|
1009
|
+
local notify_urgency="normal"
|
|
1010
|
+
case "$urgency" in
|
|
1011
|
+
critical) notify_urgency="critical" ;;
|
|
1012
|
+
low) notify_urgency="low" ;;
|
|
1013
|
+
*) notify_urgency="normal" ;;
|
|
1014
|
+
esac
|
|
1015
|
+
|
|
1016
|
+
# Escape markup characters for notify-send (supports basic Pango)
|
|
1017
|
+
local safe_title="${title//&/&}"
|
|
1018
|
+
safe_title="${safe_title//</<}"
|
|
1019
|
+
safe_title="${safe_title//>/>}"
|
|
1020
|
+
local safe_message="${message//&/&}"
|
|
1021
|
+
safe_message="${safe_message//</<}"
|
|
1022
|
+
safe_message="${safe_message//>/>}"
|
|
1023
|
+
|
|
1024
|
+
notify-send -u "$notify_urgency" "Loki Mode: $safe_title" "$safe_message" 2>/dev/null || true
|
|
1025
|
+
return 0
|
|
1026
|
+
fi
|
|
1027
|
+
|
|
1028
|
+
# Fallback: terminal bell for critical notifications
|
|
1029
|
+
if [ "$urgency" = "critical" ]; then
|
|
1030
|
+
printf '\a' # Bell character
|
|
1031
|
+
fi
|
|
1032
|
+
|
|
1033
|
+
return 0
|
|
1034
|
+
}
|
|
1035
|
+
|
|
1036
|
+
# Convenience notification functions
|
|
1037
|
+
notify_task_started() {
|
|
1038
|
+
local task_name="$1"
|
|
1039
|
+
send_notification "Task Started" "$task_name" "low"
|
|
1040
|
+
}
|
|
1041
|
+
|
|
1042
|
+
notify_task_completed() {
|
|
1043
|
+
local task_name="$1"
|
|
1044
|
+
send_notification "Task Completed" "$task_name" "normal"
|
|
1045
|
+
}
|
|
1046
|
+
|
|
1047
|
+
notify_task_failed() {
|
|
1048
|
+
local task_name="$1"
|
|
1049
|
+
local error="${2:-Unknown error}"
|
|
1050
|
+
send_notification "Task Failed" "$task_name: $error" "critical"
|
|
1051
|
+
}
|
|
1052
|
+
|
|
1053
|
+
notify_phase_complete() {
|
|
1054
|
+
local phase_name="$1"
|
|
1055
|
+
send_notification "Phase Complete" "$phase_name" "normal"
|
|
1056
|
+
}
|
|
1057
|
+
|
|
1058
|
+
notify_all_complete() {
|
|
1059
|
+
send_notification "All Tasks Complete" "Loki Mode has finished all tasks" "normal"
|
|
1060
|
+
}
|
|
1061
|
+
|
|
1062
|
+
notify_intervention_needed() {
|
|
1063
|
+
local reason="$1"
|
|
1064
|
+
send_notification "Intervention Needed" "$reason" "critical"
|
|
1065
|
+
}
|
|
1066
|
+
|
|
1067
|
+
notify_rate_limit() {
|
|
1068
|
+
local wait_time="$1"
|
|
1069
|
+
send_notification "Rate Limited" "Waiting ${wait_time}s before retry" "normal"
|
|
1070
|
+
}
|
|
1071
|
+
|
|
1072
|
+
#===============================================================================
|
|
1073
|
+
# Parallel Workflow Functions (Git Worktrees)
|
|
1074
|
+
#===============================================================================
|
|
1075
|
+
|
|
1076
|
+
# Check if parallel mode is supported (bash 4+ required)
|
|
1077
|
+
check_parallel_support() {
|
|
1078
|
+
if [ "$BASH_VERSION_MAJOR" -lt 4 ] 2>/dev/null; then
|
|
1079
|
+
log_error "Parallel mode requires bash 4.0 or higher"
|
|
1080
|
+
log_error "Current bash version: $BASH_VERSION"
|
|
1081
|
+
log_error "On macOS, install newer bash: brew install bash"
|
|
1082
|
+
return 1
|
|
1083
|
+
fi
|
|
1084
|
+
return 0
|
|
1085
|
+
}
|
|
1086
|
+
|
|
1087
|
+
# Create a worktree for a specific stream
|
|
1088
|
+
create_worktree() {
|
|
1089
|
+
local stream_name="$1"
|
|
1090
|
+
local branch_name="${2:-}"
|
|
1091
|
+
local project_name=$(basename "$TARGET_DIR")
|
|
1092
|
+
local worktree_path="${TARGET_DIR}/../${project_name}-${stream_name}"
|
|
1093
|
+
|
|
1094
|
+
if [ -d "$worktree_path" ]; then
|
|
1095
|
+
log_info "Worktree already exists: $stream_name"
|
|
1096
|
+
WORKTREE_PATHS[$stream_name]="$worktree_path"
|
|
1097
|
+
return 0
|
|
1098
|
+
fi
|
|
1099
|
+
|
|
1100
|
+
log_step "Creating worktree: $stream_name"
|
|
1101
|
+
|
|
1102
|
+
if [ -n "$branch_name" ]; then
|
|
1103
|
+
# Create new branch
|
|
1104
|
+
git -C "$TARGET_DIR" worktree add "$worktree_path" -b "$branch_name" 2>/dev/null || \
|
|
1105
|
+
git -C "$TARGET_DIR" worktree add "$worktree_path" "$branch_name" 2>/dev/null
|
|
1106
|
+
else
|
|
1107
|
+
# Track main branch
|
|
1108
|
+
git -C "$TARGET_DIR" worktree add "$worktree_path" main 2>/dev/null || \
|
|
1109
|
+
git -C "$TARGET_DIR" worktree add "$worktree_path" HEAD 2>/dev/null
|
|
1110
|
+
fi
|
|
1111
|
+
|
|
1112
|
+
if [ $? -eq 0 ]; then
|
|
1113
|
+
WORKTREE_PATHS[$stream_name]="$worktree_path"
|
|
1114
|
+
|
|
1115
|
+
# Copy .loki state to worktree
|
|
1116
|
+
if [ -d "$TARGET_DIR/.loki" ]; then
|
|
1117
|
+
cp -r "$TARGET_DIR/.loki" "$worktree_path/" 2>/dev/null || true
|
|
1118
|
+
fi
|
|
1119
|
+
|
|
1120
|
+
# Initialize environment (detect and run appropriate install)
|
|
1121
|
+
(
|
|
1122
|
+
cd "$worktree_path"
|
|
1123
|
+
if [ -f "package.json" ]; then
|
|
1124
|
+
npm install --silent 2>/dev/null || true
|
|
1125
|
+
elif [ -f "requirements.txt" ]; then
|
|
1126
|
+
pip install -r requirements.txt -q 2>/dev/null || true
|
|
1127
|
+
elif [ -f "Cargo.toml" ]; then
|
|
1128
|
+
cargo build --quiet 2>/dev/null || true
|
|
1129
|
+
fi
|
|
1130
|
+
) &
|
|
1131
|
+
|
|
1132
|
+
log_info "Created worktree: $worktree_path"
|
|
1133
|
+
return 0
|
|
1134
|
+
else
|
|
1135
|
+
log_error "Failed to create worktree: $stream_name"
|
|
1136
|
+
return 1
|
|
1137
|
+
fi
|
|
1138
|
+
}
|
|
1139
|
+
|
|
1140
|
+
# Remove a worktree
|
|
1141
|
+
remove_worktree() {
|
|
1142
|
+
local stream_name="$1"
|
|
1143
|
+
local worktree_path="${WORKTREE_PATHS[$stream_name]:-}"
|
|
1144
|
+
|
|
1145
|
+
if [ -z "$worktree_path" ] || [ ! -d "$worktree_path" ]; then
|
|
1146
|
+
return 0
|
|
1147
|
+
fi
|
|
1148
|
+
|
|
1149
|
+
log_step "Removing worktree: $stream_name"
|
|
1150
|
+
|
|
1151
|
+
# Kill any running Claude session
|
|
1152
|
+
local pid="${WORKTREE_PIDS[$stream_name]:-}"
|
|
1153
|
+
if [ -n "$pid" ] && kill -0 "$pid" 2>/dev/null; then
|
|
1154
|
+
kill "$pid" 2>/dev/null || true
|
|
1155
|
+
wait "$pid" 2>/dev/null || true
|
|
1156
|
+
fi
|
|
1157
|
+
|
|
1158
|
+
# Remove worktree
|
|
1159
|
+
git -C "$TARGET_DIR" worktree remove "$worktree_path" --force 2>/dev/null || \
|
|
1160
|
+
rm -rf "$worktree_path" 2>/dev/null
|
|
1161
|
+
|
|
1162
|
+
unset WORKTREE_PATHS[$stream_name]
|
|
1163
|
+
unset WORKTREE_PIDS[$stream_name]
|
|
1164
|
+
|
|
1165
|
+
log_info "Removed worktree: $stream_name"
|
|
1166
|
+
}
|
|
1167
|
+
|
|
1168
|
+
# Spawn a Claude session in a worktree
|
|
1169
|
+
spawn_worktree_session() {
|
|
1170
|
+
local stream_name="$1"
|
|
1171
|
+
local task_prompt="$2"
|
|
1172
|
+
local worktree_path="${WORKTREE_PATHS[$stream_name]:-}"
|
|
1173
|
+
|
|
1174
|
+
if [ -z "$worktree_path" ] || [ ! -d "$worktree_path" ]; then
|
|
1175
|
+
log_error "Worktree not found: $stream_name"
|
|
1176
|
+
return 1
|
|
1177
|
+
fi
|
|
1178
|
+
|
|
1179
|
+
# Check if session limit reached
|
|
1180
|
+
local active_count=0
|
|
1181
|
+
for pid in "${WORKTREE_PIDS[@]}"; do
|
|
1182
|
+
if kill -0 "$pid" 2>/dev/null; then
|
|
1183
|
+
((active_count++))
|
|
1184
|
+
fi
|
|
1185
|
+
done
|
|
1186
|
+
|
|
1187
|
+
if [ "$active_count" -ge "$MAX_PARALLEL_SESSIONS" ]; then
|
|
1188
|
+
log_warn "Max parallel sessions reached ($MAX_PARALLEL_SESSIONS). Waiting..."
|
|
1189
|
+
return 1
|
|
1190
|
+
fi
|
|
1191
|
+
|
|
1192
|
+
local log_file="$worktree_path/.loki/logs/session-${stream_name}.log"
|
|
1193
|
+
mkdir -p "$(dirname "$log_file")"
|
|
1194
|
+
|
|
1195
|
+
log_step "Spawning Claude session: $stream_name"
|
|
1196
|
+
|
|
1197
|
+
(
|
|
1198
|
+
cd "$worktree_path"
|
|
1199
|
+
claude --dangerously-skip-permissions \
|
|
1200
|
+
-p "Loki Mode: $task_prompt. Read .loki/CONTINUITY.md for context." \
|
|
1201
|
+
>> "$log_file" 2>&1
|
|
1202
|
+
) &
|
|
1203
|
+
|
|
1204
|
+
local pid=$!
|
|
1205
|
+
WORKTREE_PIDS[$stream_name]=$pid
|
|
1206
|
+
|
|
1207
|
+
log_info "Session spawned: $stream_name (PID: $pid)"
|
|
1208
|
+
return 0
|
|
1209
|
+
}
|
|
1210
|
+
|
|
1211
|
+
# List all active worktrees
|
|
1212
|
+
list_worktrees() {
|
|
1213
|
+
log_header "Active Worktrees"
|
|
1214
|
+
|
|
1215
|
+
git -C "$TARGET_DIR" worktree list 2>/dev/null
|
|
1216
|
+
|
|
1217
|
+
echo ""
|
|
1218
|
+
log_info "Tracked sessions:"
|
|
1219
|
+
for stream in "${!WORKTREE_PIDS[@]}"; do
|
|
1220
|
+
local pid="${WORKTREE_PIDS[$stream]}"
|
|
1221
|
+
local status="stopped"
|
|
1222
|
+
if kill -0 "$pid" 2>/dev/null; then
|
|
1223
|
+
status="running"
|
|
1224
|
+
fi
|
|
1225
|
+
echo " [$stream] PID: $pid - $status"
|
|
1226
|
+
done
|
|
1227
|
+
}
|
|
1228
|
+
|
|
1229
|
+
# Check for completed features ready to merge
|
|
1230
|
+
check_merge_queue() {
|
|
1231
|
+
local signals_dir="$TARGET_DIR/.loki/signals"
|
|
1232
|
+
|
|
1233
|
+
if [ ! -d "$signals_dir" ]; then
|
|
1234
|
+
return 0
|
|
1235
|
+
fi
|
|
1236
|
+
|
|
1237
|
+
for signal in "$signals_dir"/MERGE_REQUESTED_*; do
|
|
1238
|
+
if [ -f "$signal" ]; then
|
|
1239
|
+
local feature=$(basename "$signal" | sed 's/MERGE_REQUESTED_//')
|
|
1240
|
+
log_info "Merge requested: $feature"
|
|
1241
|
+
|
|
1242
|
+
if [ "$AUTO_MERGE" = "true" ]; then
|
|
1243
|
+
merge_feature "$feature"
|
|
1244
|
+
fi
|
|
1245
|
+
fi
|
|
1246
|
+
done
|
|
1247
|
+
}
|
|
1248
|
+
|
|
1249
|
+
# AI-powered conflict resolution (inspired by Auto-Claude)
|
|
1250
|
+
resolve_conflicts_with_ai() {
|
|
1251
|
+
local feature="$1"
|
|
1252
|
+
local conflict_files=$(git diff --name-only --diff-filter=U 2>/dev/null)
|
|
1253
|
+
|
|
1254
|
+
if [ -z "$conflict_files" ]; then
|
|
1255
|
+
return 0
|
|
1256
|
+
fi
|
|
1257
|
+
|
|
1258
|
+
log_step "AI-powered conflict resolution for: $feature"
|
|
1259
|
+
|
|
1260
|
+
for file in $conflict_files; do
|
|
1261
|
+
log_info "Resolving conflicts in: $file"
|
|
1262
|
+
|
|
1263
|
+
# Get conflict markers
|
|
1264
|
+
local conflict_content=$(cat "$file")
|
|
1265
|
+
|
|
1266
|
+
# Use Claude to resolve conflict
|
|
1267
|
+
local resolution=$(claude --dangerously-skip-permissions -p "
|
|
1268
|
+
You are resolving a git merge conflict. The file below contains conflict markers.
|
|
1269
|
+
Your task is to merge both changes intelligently, preserving functionality from both sides.
|
|
1270
|
+
|
|
1271
|
+
FILE: $file
|
|
1272
|
+
CONTENT:
|
|
1273
|
+
$conflict_content
|
|
1274
|
+
|
|
1275
|
+
Output ONLY the resolved file content with no conflict markers. No explanations.
|
|
1276
|
+
" --output-format text 2>/dev/null)
|
|
1277
|
+
|
|
1278
|
+
if [ -n "$resolution" ]; then
|
|
1279
|
+
echo "$resolution" > "$file"
|
|
1280
|
+
git add "$file"
|
|
1281
|
+
log_info "Resolved: $file"
|
|
1282
|
+
else
|
|
1283
|
+
log_error "AI resolution failed for: $file"
|
|
1284
|
+
return 1
|
|
1285
|
+
fi
|
|
1286
|
+
done
|
|
1287
|
+
|
|
1288
|
+
return 0
|
|
1289
|
+
}
|
|
1290
|
+
|
|
1291
|
+
# Merge a completed feature branch (with AI conflict resolution)
|
|
1292
|
+
merge_feature() {
|
|
1293
|
+
local feature="$1"
|
|
1294
|
+
local branch="feature/$feature"
|
|
1295
|
+
|
|
1296
|
+
log_step "Merging feature: $feature"
|
|
1297
|
+
|
|
1298
|
+
(
|
|
1299
|
+
cd "$TARGET_DIR"
|
|
1300
|
+
|
|
1301
|
+
# Ensure we're on main
|
|
1302
|
+
git checkout main 2>/dev/null
|
|
1303
|
+
|
|
1304
|
+
# Attempt merge with no-ff for clear history
|
|
1305
|
+
if git merge "$branch" --no-ff -m "feat: Merge $feature" 2>/dev/null; then
|
|
1306
|
+
log_info "Merged cleanly: $feature"
|
|
1307
|
+
else
|
|
1308
|
+
# Merge has conflicts - try AI resolution
|
|
1309
|
+
log_warn "Merge conflicts detected - attempting AI resolution"
|
|
1310
|
+
|
|
1311
|
+
if resolve_conflicts_with_ai "$feature"; then
|
|
1312
|
+
# AI resolved conflicts, commit the merge
|
|
1313
|
+
git commit -m "feat: Merge $feature (AI-resolved conflicts)"
|
|
1314
|
+
log_info "Merged with AI conflict resolution: $feature"
|
|
1315
|
+
else
|
|
1316
|
+
# AI resolution failed, abort merge
|
|
1317
|
+
log_error "AI conflict resolution failed: $feature"
|
|
1318
|
+
git merge --abort 2>/dev/null || true
|
|
1319
|
+
return 1
|
|
1320
|
+
fi
|
|
1321
|
+
fi
|
|
1322
|
+
|
|
1323
|
+
# Remove signal
|
|
1324
|
+
rm -f ".loki/signals/MERGE_REQUESTED_$feature"
|
|
1325
|
+
|
|
1326
|
+
# Remove worktree
|
|
1327
|
+
remove_worktree "feature-$feature"
|
|
1328
|
+
|
|
1329
|
+
# Delete branch
|
|
1330
|
+
git branch -d "$branch" 2>/dev/null || true
|
|
1331
|
+
|
|
1332
|
+
# Signal for docs update
|
|
1333
|
+
touch ".loki/signals/DOCS_NEEDED"
|
|
1334
|
+
)
|
|
1335
|
+
}
|
|
1336
|
+
|
|
1337
|
+
# Initialize parallel workflow streams
|
|
1338
|
+
init_parallel_streams() {
|
|
1339
|
+
# Check bash version
|
|
1340
|
+
if ! check_parallel_support; then
|
|
1341
|
+
return 1
|
|
1342
|
+
fi
|
|
1343
|
+
|
|
1344
|
+
log_header "Initializing Parallel Workflows"
|
|
1345
|
+
|
|
1346
|
+
local active_streams=0
|
|
1347
|
+
|
|
1348
|
+
# Create testing worktree (always tracks main)
|
|
1349
|
+
if [ "$PARALLEL_TESTING" = "true" ]; then
|
|
1350
|
+
create_worktree "testing"
|
|
1351
|
+
((active_streams++))
|
|
1352
|
+
fi
|
|
1353
|
+
|
|
1354
|
+
# Create documentation worktree
|
|
1355
|
+
if [ "$PARALLEL_DOCS" = "true" ]; then
|
|
1356
|
+
create_worktree "docs"
|
|
1357
|
+
((active_streams++))
|
|
1358
|
+
fi
|
|
1359
|
+
|
|
1360
|
+
# Create blog worktree if enabled
|
|
1361
|
+
if [ "$PARALLEL_BLOG" = "true" ]; then
|
|
1362
|
+
create_worktree "blog"
|
|
1363
|
+
((active_streams++))
|
|
1364
|
+
fi
|
|
1365
|
+
|
|
1366
|
+
log_info "Initialized $active_streams parallel streams"
|
|
1367
|
+
list_worktrees
|
|
1368
|
+
}
|
|
1369
|
+
|
|
1370
|
+
# Spawn feature worktree from task
|
|
1371
|
+
spawn_feature_stream() {
|
|
1372
|
+
local feature_name="$1"
|
|
1373
|
+
local task_description="$2"
|
|
1374
|
+
|
|
1375
|
+
# Check worktree limit
|
|
1376
|
+
local worktree_count=$(git -C "$TARGET_DIR" worktree list 2>/dev/null | wc -l)
|
|
1377
|
+
if [ "$worktree_count" -ge "$MAX_WORKTREES" ]; then
|
|
1378
|
+
log_warn "Max worktrees reached ($MAX_WORKTREES). Queuing feature: $feature_name"
|
|
1379
|
+
return 1
|
|
1380
|
+
fi
|
|
1381
|
+
|
|
1382
|
+
create_worktree "feature-$feature_name" "feature/$feature_name"
|
|
1383
|
+
spawn_worktree_session "feature-$feature_name" "$task_description"
|
|
1384
|
+
}
|
|
1385
|
+
|
|
1386
|
+
# Cleanup all worktrees on exit
|
|
1387
|
+
cleanup_parallel_streams() {
|
|
1388
|
+
log_header "Cleaning Up Parallel Streams"
|
|
1389
|
+
|
|
1390
|
+
# Kill all sessions
|
|
1391
|
+
for stream in "${!WORKTREE_PIDS[@]}"; do
|
|
1392
|
+
local pid="${WORKTREE_PIDS[$stream]}"
|
|
1393
|
+
if kill -0 "$pid" 2>/dev/null; then
|
|
1394
|
+
log_step "Stopping session: $stream"
|
|
1395
|
+
kill "$pid" 2>/dev/null || true
|
|
1396
|
+
fi
|
|
1397
|
+
done
|
|
1398
|
+
|
|
1399
|
+
# Wait for all to finish
|
|
1400
|
+
wait 2>/dev/null || true
|
|
1401
|
+
|
|
1402
|
+
# Optionally remove worktrees (keep by default for inspection)
|
|
1403
|
+
# Uncomment to auto-cleanup:
|
|
1404
|
+
# for stream in "${!WORKTREE_PATHS[@]}"; do
|
|
1405
|
+
# remove_worktree "$stream"
|
|
1406
|
+
# done
|
|
1407
|
+
|
|
1408
|
+
log_info "Parallel streams stopped"
|
|
1409
|
+
}
|
|
1410
|
+
|
|
1411
|
+
# Orchestrator loop for parallel mode
|
|
1412
|
+
run_parallel_orchestrator() {
|
|
1413
|
+
log_header "Parallel Orchestrator Started"
|
|
1414
|
+
|
|
1415
|
+
# Initialize streams
|
|
1416
|
+
init_parallel_streams
|
|
1417
|
+
|
|
1418
|
+
# Spawn testing session
|
|
1419
|
+
if [ "$PARALLEL_TESTING" = "true" ] && [ -n "${WORKTREE_PATHS[testing]:-}" ]; then
|
|
1420
|
+
spawn_worktree_session "testing" "Run all tests continuously. Watch for changes. Report failures to .loki/state/test-results.json"
|
|
1421
|
+
fi
|
|
1422
|
+
|
|
1423
|
+
# Spawn docs session
|
|
1424
|
+
if [ "$PARALLEL_DOCS" = "true" ] && [ -n "${WORKTREE_PATHS[docs]:-}" ]; then
|
|
1425
|
+
spawn_worktree_session "docs" "Monitor for DOCS_NEEDED signal. Update documentation for recent changes. Check git log."
|
|
1426
|
+
fi
|
|
1427
|
+
|
|
1428
|
+
# Main orchestrator loop
|
|
1429
|
+
local running=true
|
|
1430
|
+
trap 'running=false; cleanup_parallel_streams' INT TERM
|
|
1431
|
+
|
|
1432
|
+
while $running; do
|
|
1433
|
+
# Check for merge requests
|
|
1434
|
+
check_merge_queue
|
|
1435
|
+
|
|
1436
|
+
# Check session health
|
|
1437
|
+
for stream in "${!WORKTREE_PIDS[@]}"; do
|
|
1438
|
+
local pid="${WORKTREE_PIDS[$stream]}"
|
|
1439
|
+
if ! kill -0 "$pid" 2>/dev/null; then
|
|
1440
|
+
log_warn "Session ended: $stream"
|
|
1441
|
+
unset WORKTREE_PIDS[$stream]
|
|
1442
|
+
fi
|
|
1443
|
+
done
|
|
1444
|
+
|
|
1445
|
+
# Update orchestrator state
|
|
1446
|
+
local state_file="$TARGET_DIR/.loki/state/parallel-streams.json"
|
|
1447
|
+
mkdir -p "$(dirname "$state_file")"
|
|
1448
|
+
|
|
1449
|
+
cat > "$state_file" << EOF
|
|
1450
|
+
{
|
|
1451
|
+
"timestamp": "$(date -Iseconds)",
|
|
1452
|
+
"worktrees": {
|
|
1453
|
+
$(for stream in "${!WORKTREE_PATHS[@]}"; do
|
|
1454
|
+
local path="${WORKTREE_PATHS[$stream]}"
|
|
1455
|
+
local pid="${WORKTREE_PIDS[$stream]:-null}"
|
|
1456
|
+
local status="stopped"
|
|
1457
|
+
if [ "$pid" != "null" ] && kill -0 "$pid" 2>/dev/null; then
|
|
1458
|
+
status="running"
|
|
1459
|
+
fi
|
|
1460
|
+
echo " \"$stream\": {\"path\": \"$path\", \"pid\": $pid, \"status\": \"$status\"},"
|
|
1461
|
+
done | sed '$ s/,$//')
|
|
1462
|
+
},
|
|
1463
|
+
"active_sessions": ${#WORKTREE_PIDS[@]},
|
|
1464
|
+
"max_sessions": $MAX_PARALLEL_SESSIONS
|
|
1465
|
+
}
|
|
1466
|
+
EOF
|
|
1467
|
+
|
|
1468
|
+
sleep 30
|
|
1469
|
+
done
|
|
1470
|
+
}
|
|
1471
|
+
|
|
1472
|
+
#===============================================================================
|
|
1473
|
+
# Prerequisites Check
|
|
1474
|
+
#===============================================================================
|
|
1475
|
+
|
|
1476
|
+
check_prerequisites() {
|
|
1477
|
+
log_header "Checking Prerequisites"
|
|
1478
|
+
|
|
1479
|
+
local missing=()
|
|
1480
|
+
|
|
1481
|
+
# Check Claude Code CLI
|
|
1482
|
+
log_step "Checking Claude Code CLI..."
|
|
1483
|
+
if command -v claude &> /dev/null; then
|
|
1484
|
+
local version=$(claude --version 2>/dev/null | head -1 || echo "unknown")
|
|
1485
|
+
log_info "Claude Code CLI: $version"
|
|
1486
|
+
else
|
|
1487
|
+
missing+=("claude")
|
|
1488
|
+
log_error "Claude Code CLI not found"
|
|
1489
|
+
log_info "Install: https://claude.ai/code or npm install -g @anthropic-ai/claude-code"
|
|
1490
|
+
fi
|
|
1491
|
+
|
|
1492
|
+
# Check Python 3
|
|
1493
|
+
log_step "Checking Python 3..."
|
|
1494
|
+
if command -v python3 &> /dev/null; then
|
|
1495
|
+
local py_version=$(python3 --version 2>&1)
|
|
1496
|
+
log_info "Python: $py_version"
|
|
1497
|
+
else
|
|
1498
|
+
missing+=("python3")
|
|
1499
|
+
log_error "Python 3 not found"
|
|
1500
|
+
fi
|
|
1501
|
+
|
|
1502
|
+
# Check Git
|
|
1503
|
+
log_step "Checking Git..."
|
|
1504
|
+
if command -v git &> /dev/null; then
|
|
1505
|
+
local git_version=$(git --version)
|
|
1506
|
+
log_info "Git: $git_version"
|
|
1507
|
+
else
|
|
1508
|
+
missing+=("git")
|
|
1509
|
+
log_error "Git not found"
|
|
1510
|
+
fi
|
|
1511
|
+
|
|
1512
|
+
# Check Node.js (optional but recommended)
|
|
1513
|
+
log_step "Checking Node.js (optional)..."
|
|
1514
|
+
if command -v node &> /dev/null; then
|
|
1515
|
+
local node_version=$(node --version)
|
|
1516
|
+
log_info "Node.js: $node_version"
|
|
1517
|
+
else
|
|
1518
|
+
log_warn "Node.js not found (optional, needed for some builds)"
|
|
1519
|
+
fi
|
|
1520
|
+
|
|
1521
|
+
# Check npm (optional)
|
|
1522
|
+
if command -v npm &> /dev/null; then
|
|
1523
|
+
local npm_version=$(npm --version)
|
|
1524
|
+
log_info "npm: $npm_version"
|
|
1525
|
+
fi
|
|
1526
|
+
|
|
1527
|
+
# Check curl (for web fetches)
|
|
1528
|
+
log_step "Checking curl..."
|
|
1529
|
+
if command -v curl &> /dev/null; then
|
|
1530
|
+
log_info "curl: available"
|
|
1531
|
+
else
|
|
1532
|
+
missing+=("curl")
|
|
1533
|
+
log_error "curl not found"
|
|
1534
|
+
fi
|
|
1535
|
+
|
|
1536
|
+
# Check jq (optional but helpful)
|
|
1537
|
+
log_step "Checking jq (optional)..."
|
|
1538
|
+
if command -v jq &> /dev/null; then
|
|
1539
|
+
log_info "jq: available"
|
|
1540
|
+
else
|
|
1541
|
+
log_warn "jq not found (optional, for JSON parsing)"
|
|
1542
|
+
fi
|
|
1543
|
+
|
|
1544
|
+
# Summary
|
|
1545
|
+
echo ""
|
|
1546
|
+
if [ ${#missing[@]} -gt 0 ]; then
|
|
1547
|
+
log_error "Missing required tools: ${missing[*]}"
|
|
1548
|
+
log_info "Please install the missing tools and try again."
|
|
1549
|
+
return 1
|
|
1550
|
+
else
|
|
1551
|
+
log_info "All required prerequisites are installed!"
|
|
1552
|
+
return 0
|
|
1553
|
+
fi
|
|
1554
|
+
}
|
|
1555
|
+
|
|
1556
|
+
#===============================================================================
|
|
1557
|
+
# Skill Installation Check
|
|
1558
|
+
#===============================================================================
|
|
1559
|
+
|
|
1560
|
+
check_skill_installed() {
|
|
1561
|
+
log_header "Checking Loki Mode Skill"
|
|
1562
|
+
|
|
1563
|
+
local skill_locations=(
|
|
1564
|
+
"$HOME/.claude/skills/loki-mode/SKILL.md"
|
|
1565
|
+
".claude/skills/loki-mode/SKILL.md"
|
|
1566
|
+
"$PROJECT_DIR/SKILL.md"
|
|
1567
|
+
)
|
|
1568
|
+
|
|
1569
|
+
for loc in "${skill_locations[@]}"; do
|
|
1570
|
+
if [ -f "$loc" ]; then
|
|
1571
|
+
log_info "Skill found: $loc"
|
|
1572
|
+
return 0
|
|
1573
|
+
fi
|
|
1574
|
+
done
|
|
1575
|
+
|
|
1576
|
+
log_warn "Loki Mode skill not found in standard locations"
|
|
1577
|
+
log_info "The skill will be used from: $PROJECT_DIR/SKILL.md"
|
|
1578
|
+
|
|
1579
|
+
if [ -f "$PROJECT_DIR/SKILL.md" ]; then
|
|
1580
|
+
log_info "Using skill from project directory"
|
|
1581
|
+
return 0
|
|
1582
|
+
else
|
|
1583
|
+
log_error "SKILL.md not found!"
|
|
1584
|
+
return 1
|
|
1585
|
+
fi
|
|
1586
|
+
}
|
|
1587
|
+
|
|
1588
|
+
#===============================================================================
|
|
1589
|
+
# Initialize Loki Directory
|
|
1590
|
+
#===============================================================================
|
|
1591
|
+
|
|
1592
|
+
init_loki_dir() {
|
|
1593
|
+
log_header "Initializing Loki Mode Directory"
|
|
1594
|
+
|
|
1595
|
+
mkdir -p .loki/{state,queue,messages,logs,config,prompts,artifacts,scripts}
|
|
1596
|
+
mkdir -p .loki/queue
|
|
1597
|
+
mkdir -p .loki/state/checkpoints
|
|
1598
|
+
mkdir -p .loki/artifacts/{releases,reports,backups}
|
|
1599
|
+
mkdir -p .loki/memory/{ledgers,handoffs,learnings,episodic,semantic,skills}
|
|
1600
|
+
mkdir -p .loki/metrics/{efficiency,rewards}
|
|
1601
|
+
mkdir -p .loki/rules
|
|
1602
|
+
mkdir -p .loki/signals
|
|
1603
|
+
|
|
1604
|
+
# Initialize queue files if they don't exist
|
|
1605
|
+
for queue in pending in-progress completed failed dead-letter; do
|
|
1606
|
+
if [ ! -f ".loki/queue/${queue}.json" ]; then
|
|
1607
|
+
echo "[]" > ".loki/queue/${queue}.json"
|
|
1608
|
+
fi
|
|
1609
|
+
done
|
|
1610
|
+
|
|
1611
|
+
# Initialize orchestrator state if it doesn't exist
|
|
1612
|
+
if [ ! -f ".loki/state/orchestrator.json" ]; then
|
|
1613
|
+
cat > ".loki/state/orchestrator.json" << EOF
|
|
1614
|
+
{
|
|
1615
|
+
"version": "$(cat "$PROJECT_DIR/VERSION" 2>/dev/null || echo "2.2.0")",
|
|
1616
|
+
"currentPhase": "BOOTSTRAP",
|
|
1617
|
+
"startedAt": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
|
1618
|
+
"agents": {},
|
|
1619
|
+
"metrics": {
|
|
1620
|
+
"tasksCompleted": 0,
|
|
1621
|
+
"tasksFailed": 0,
|
|
1622
|
+
"retries": 0
|
|
1623
|
+
}
|
|
1624
|
+
}
|
|
1625
|
+
EOF
|
|
1626
|
+
fi
|
|
1627
|
+
|
|
1628
|
+
log_info "Loki directory initialized: .loki/"
|
|
1629
|
+
}
|
|
1630
|
+
|
|
1631
|
+
#===============================================================================
|
|
1632
|
+
# Task Status Monitor
|
|
1633
|
+
#===============================================================================
|
|
1634
|
+
|
|
1635
|
+
update_status_file() {
|
|
1636
|
+
# Create a human-readable status file
|
|
1637
|
+
local status_file=".loki/STATUS.txt"
|
|
1638
|
+
|
|
1639
|
+
# Get current phase
|
|
1640
|
+
local current_phase="UNKNOWN"
|
|
1641
|
+
if [ -f ".loki/state/orchestrator.json" ]; then
|
|
1642
|
+
current_phase=$(python3 -c "import json; print(json.load(open('.loki/state/orchestrator.json')).get('currentPhase', 'UNKNOWN'))" 2>/dev/null || echo "UNKNOWN")
|
|
1643
|
+
fi
|
|
1644
|
+
|
|
1645
|
+
# Count tasks in each queue
|
|
1646
|
+
local pending=0 in_progress=0 completed=0 failed=0
|
|
1647
|
+
[ -f ".loki/queue/pending.json" ] && pending=$(python3 -c "import json; print(len(json.load(open('.loki/queue/pending.json'))))" 2>/dev/null || echo "0")
|
|
1648
|
+
[ -f ".loki/queue/in-progress.json" ] && in_progress=$(python3 -c "import json; print(len(json.load(open('.loki/queue/in-progress.json'))))" 2>/dev/null || echo "0")
|
|
1649
|
+
[ -f ".loki/queue/completed.json" ] && completed=$(python3 -c "import json; print(len(json.load(open('.loki/queue/completed.json'))))" 2>/dev/null || echo "0")
|
|
1650
|
+
[ -f ".loki/queue/failed.json" ] && failed=$(python3 -c "import json; print(len(json.load(open('.loki/queue/failed.json'))))" 2>/dev/null || echo "0")
|
|
1651
|
+
|
|
1652
|
+
cat > "$status_file" << EOF
|
|
1653
|
+
╔════════════════════════════════════════════════════════════════╗
|
|
1654
|
+
║ LOKI MODE STATUS ║
|
|
1655
|
+
╚════════════════════════════════════════════════════════════════╝
|
|
1656
|
+
|
|
1657
|
+
Updated: $(date)
|
|
1658
|
+
|
|
1659
|
+
Phase: $current_phase
|
|
1660
|
+
|
|
1661
|
+
Tasks:
|
|
1662
|
+
├─ Pending: $pending
|
|
1663
|
+
├─ In Progress: $in_progress
|
|
1664
|
+
├─ Completed: $completed
|
|
1665
|
+
└─ Failed: $failed
|
|
1666
|
+
|
|
1667
|
+
Monitor: watch -n 2 cat .loki/STATUS.txt
|
|
1668
|
+
EOF
|
|
1669
|
+
}
|
|
1670
|
+
|
|
1671
|
+
#===============================================================================
|
|
1672
|
+
# Dashboard State Writer (Real-time sync with web dashboard)
|
|
1673
|
+
#===============================================================================
|
|
1674
|
+
|
|
1675
|
+
write_dashboard_state() {
|
|
1676
|
+
# Write comprehensive dashboard state to JSON for web dashboard consumption
|
|
1677
|
+
local output_file=".loki/dashboard-state.json"
|
|
1678
|
+
|
|
1679
|
+
# Get current phase and version
|
|
1680
|
+
local current_phase="BOOTSTRAP"
|
|
1681
|
+
local version="unknown"
|
|
1682
|
+
local started_at=""
|
|
1683
|
+
local tasks_completed=0
|
|
1684
|
+
local tasks_failed=0
|
|
1685
|
+
|
|
1686
|
+
if [ -f ".loki/state/orchestrator.json" ]; then
|
|
1687
|
+
current_phase=$(python3 -c "import json; print(json.load(open('.loki/state/orchestrator.json')).get('currentPhase', 'BOOTSTRAP'))" 2>/dev/null || echo "BOOTSTRAP")
|
|
1688
|
+
version=$(python3 -c "import json; print(json.load(open('.loki/state/orchestrator.json')).get('version', 'unknown'))" 2>/dev/null || echo "unknown")
|
|
1689
|
+
started_at=$(python3 -c "import json; print(json.load(open('.loki/state/orchestrator.json')).get('startedAt', ''))" 2>/dev/null || echo "")
|
|
1690
|
+
tasks_completed=$(python3 -c "import json; print(json.load(open('.loki/state/orchestrator.json')).get('metrics', {}).get('tasksCompleted', 0))" 2>/dev/null || echo "0")
|
|
1691
|
+
tasks_failed=$(python3 -c "import json; print(json.load(open('.loki/state/orchestrator.json')).get('metrics', {}).get('tasksFailed', 0))" 2>/dev/null || echo "0")
|
|
1692
|
+
fi
|
|
1693
|
+
|
|
1694
|
+
# Get task counts from queues
|
|
1695
|
+
local pending_tasks="[]"
|
|
1696
|
+
local in_progress_tasks="[]"
|
|
1697
|
+
local completed_tasks="[]"
|
|
1698
|
+
local failed_tasks="[]"
|
|
1699
|
+
local review_tasks="[]"
|
|
1700
|
+
|
|
1701
|
+
[ -f ".loki/queue/pending.json" ] && pending_tasks=$(cat ".loki/queue/pending.json" 2>/dev/null || echo "[]")
|
|
1702
|
+
[ -f ".loki/queue/in-progress.json" ] && in_progress_tasks=$(cat ".loki/queue/in-progress.json" 2>/dev/null || echo "[]")
|
|
1703
|
+
[ -f ".loki/queue/completed.json" ] && completed_tasks=$(cat ".loki/queue/completed.json" 2>/dev/null || echo "[]")
|
|
1704
|
+
[ -f ".loki/queue/failed.json" ] && failed_tasks=$(cat ".loki/queue/failed.json" 2>/dev/null || echo "[]")
|
|
1705
|
+
[ -f ".loki/queue/review.json" ] && review_tasks=$(cat ".loki/queue/review.json" 2>/dev/null || echo "[]")
|
|
1706
|
+
|
|
1707
|
+
# Get agents state
|
|
1708
|
+
local agents="[]"
|
|
1709
|
+
[ -f ".loki/state/agents.json" ] && agents=$(cat ".loki/state/agents.json" 2>/dev/null || echo "[]")
|
|
1710
|
+
|
|
1711
|
+
# Get resources state
|
|
1712
|
+
local cpu_usage=0
|
|
1713
|
+
local mem_usage=0
|
|
1714
|
+
local resource_status="ok"
|
|
1715
|
+
|
|
1716
|
+
if [ -f ".loki/state/resources.json" ]; then
|
|
1717
|
+
cpu_usage=$(python3 -c "import json; print(json.load(open('.loki/state/resources.json')).get('cpu', {}).get('usage_percent', 0))" 2>/dev/null || echo "0")
|
|
1718
|
+
mem_usage=$(python3 -c "import json; print(json.load(open('.loki/state/resources.json')).get('memory', {}).get('usage_percent', 0))" 2>/dev/null || echo "0")
|
|
1719
|
+
resource_status=$(python3 -c "import json; print(json.load(open('.loki/state/resources.json')).get('overall_status', 'ok'))" 2>/dev/null || echo "ok")
|
|
1720
|
+
fi
|
|
1721
|
+
|
|
1722
|
+
# Check human intervention signals
|
|
1723
|
+
local mode="autonomous"
|
|
1724
|
+
if [ -f ".loki/PAUSE" ]; then
|
|
1725
|
+
mode="paused"
|
|
1726
|
+
elif [ -f ".loki/STOP" ]; then
|
|
1727
|
+
mode="stopped"
|
|
1728
|
+
fi
|
|
1729
|
+
|
|
1730
|
+
# Get complexity tier
|
|
1731
|
+
local complexity="${DETECTED_COMPLEXITY:-standard}"
|
|
1732
|
+
|
|
1733
|
+
# Get RARV cycle step (approximate based on iteration)
|
|
1734
|
+
local rarv_step=$((ITERATION_COUNT % 4))
|
|
1735
|
+
local rarv_stages='["reason", "act", "reflect", "verify"]'
|
|
1736
|
+
|
|
1737
|
+
# Get memory system stats (if available)
|
|
1738
|
+
local episodic_count=0
|
|
1739
|
+
local semantic_count=0
|
|
1740
|
+
local procedural_count=0
|
|
1741
|
+
|
|
1742
|
+
[ -d ".loki/memory/episodic" ] && episodic_count=$(find ".loki/memory/episodic" -type f -name "*.json" 2>/dev/null | wc -l | tr -d ' ')
|
|
1743
|
+
[ -d ".loki/memory/semantic" ] && semantic_count=$(find ".loki/memory/semantic" -type f -name "*.json" 2>/dev/null | wc -l | tr -d ' ')
|
|
1744
|
+
[ -d ".loki/memory/skills" ] && procedural_count=$(find ".loki/memory/skills" -type f -name "*.json" 2>/dev/null | wc -l | tr -d ' ')
|
|
1745
|
+
|
|
1746
|
+
# Get quality gates status (if available)
|
|
1747
|
+
local quality_gates='{"staticAnalysis":"pending","codeReview":"pending","antiSycophancy":"pending","testCoverage":"pending","securityScan":"pending","performance":"pending"}'
|
|
1748
|
+
if [ -f ".loki/state/quality-gates.json" ]; then
|
|
1749
|
+
quality_gates=$(cat ".loki/state/quality-gates.json" 2>/dev/null || echo "$quality_gates")
|
|
1750
|
+
fi
|
|
1751
|
+
|
|
1752
|
+
# Write comprehensive JSON state
|
|
1753
|
+
cat > "$output_file" << EOF
|
|
1754
|
+
{
|
|
1755
|
+
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
|
1756
|
+
"version": "$version",
|
|
1757
|
+
"mode": "$mode",
|
|
1758
|
+
"phase": "$current_phase",
|
|
1759
|
+
"complexity": "$complexity",
|
|
1760
|
+
"iteration": $ITERATION_COUNT,
|
|
1761
|
+
"startedAt": "$started_at",
|
|
1762
|
+
"rarv": {
|
|
1763
|
+
"currentStep": $rarv_step,
|
|
1764
|
+
"stages": $rarv_stages
|
|
1765
|
+
},
|
|
1766
|
+
"tasks": {
|
|
1767
|
+
"pending": $pending_tasks,
|
|
1768
|
+
"inProgress": $in_progress_tasks,
|
|
1769
|
+
"review": $review_tasks,
|
|
1770
|
+
"completed": $completed_tasks,
|
|
1771
|
+
"failed": $failed_tasks
|
|
1772
|
+
},
|
|
1773
|
+
"agents": $agents,
|
|
1774
|
+
"metrics": {
|
|
1775
|
+
"tasksCompleted": $tasks_completed,
|
|
1776
|
+
"tasksFailed": $tasks_failed,
|
|
1777
|
+
"cpuUsage": $cpu_usage,
|
|
1778
|
+
"memoryUsage": $mem_usage,
|
|
1779
|
+
"resourceStatus": "$resource_status"
|
|
1780
|
+
},
|
|
1781
|
+
"memory": {
|
|
1782
|
+
"episodic": $episodic_count,
|
|
1783
|
+
"semantic": $semantic_count,
|
|
1784
|
+
"procedural": $procedural_count
|
|
1785
|
+
},
|
|
1786
|
+
"qualityGates": $quality_gates
|
|
1787
|
+
}
|
|
1788
|
+
EOF
|
|
1789
|
+
}
|
|
1790
|
+
|
|
1791
|
+
start_status_monitor() {
|
|
1792
|
+
log_step "Starting status monitor..."
|
|
1793
|
+
|
|
1794
|
+
# Initial update
|
|
1795
|
+
update_status_file
|
|
1796
|
+
update_agents_state
|
|
1797
|
+
write_dashboard_state
|
|
1798
|
+
|
|
1799
|
+
# Background update loop (2-second interval for realtime dashboard)
|
|
1800
|
+
(
|
|
1801
|
+
while true; do
|
|
1802
|
+
update_status_file
|
|
1803
|
+
update_agents_state
|
|
1804
|
+
write_dashboard_state
|
|
1805
|
+
sleep 2
|
|
1806
|
+
done
|
|
1807
|
+
) &
|
|
1808
|
+
STATUS_MONITOR_PID=$!
|
|
1809
|
+
|
|
1810
|
+
log_info "Status monitor started"
|
|
1811
|
+
log_info "Monitor progress: ${CYAN}watch -n 2 cat .loki/STATUS.txt${NC}"
|
|
1812
|
+
}
|
|
1813
|
+
|
|
1814
|
+
stop_status_monitor() {
|
|
1815
|
+
if [ -n "$STATUS_MONITOR_PID" ]; then
|
|
1816
|
+
kill "$STATUS_MONITOR_PID" 2>/dev/null || true
|
|
1817
|
+
wait "$STATUS_MONITOR_PID" 2>/dev/null || true
|
|
1818
|
+
fi
|
|
1819
|
+
stop_resource_monitor
|
|
1820
|
+
}
|
|
1821
|
+
|
|
1822
|
+
#===============================================================================
|
|
1823
|
+
# Web Dashboard
|
|
1824
|
+
#===============================================================================
|
|
1825
|
+
|
|
1826
|
+
generate_dashboard() {
|
|
1827
|
+
# Copy dashboard from skill installation (v4.0.0 with Anthropic design language)
|
|
1828
|
+
local skill_dashboard="$SCRIPT_DIR/.loki/dashboard/index.html"
|
|
1829
|
+
if [ -f "$skill_dashboard" ]; then
|
|
1830
|
+
cp "$skill_dashboard" .loki/dashboard/index.html
|
|
1831
|
+
log_info "Dashboard copied from skill installation"
|
|
1832
|
+
return
|
|
1833
|
+
fi
|
|
1834
|
+
|
|
1835
|
+
# Fallback: Generate basic dashboard if external file not found
|
|
1836
|
+
cat > .loki/dashboard/index.html << 'DASHBOARD_HTML'
|
|
1837
|
+
<!DOCTYPE html>
|
|
1838
|
+
<html lang="en">
|
|
1839
|
+
<head>
|
|
1840
|
+
<meta charset="UTF-8">
|
|
1841
|
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
1842
|
+
<title>Loki Mode Dashboard</title>
|
|
1843
|
+
<style>
|
|
1844
|
+
* { box-sizing: border-box; margin: 0; padding: 0; }
|
|
1845
|
+
body {
|
|
1846
|
+
font-family: 'Söhne', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
|
1847
|
+
background: #FAF9F6;
|
|
1848
|
+
color: #1A1A1A;
|
|
1849
|
+
padding: 24px;
|
|
1850
|
+
min-height: 100vh;
|
|
1851
|
+
}
|
|
1852
|
+
.header {
|
|
1853
|
+
text-align: center;
|
|
1854
|
+
padding: 32px 20px;
|
|
1855
|
+
margin-bottom: 32px;
|
|
1856
|
+
}
|
|
1857
|
+
.header h1 {
|
|
1858
|
+
color: #D97757;
|
|
1859
|
+
font-size: 28px;
|
|
1860
|
+
font-weight: 600;
|
|
1861
|
+
letter-spacing: -0.5px;
|
|
1862
|
+
margin-bottom: 8px;
|
|
1863
|
+
}
|
|
1864
|
+
.header .subtitle {
|
|
1865
|
+
color: #666;
|
|
1866
|
+
font-size: 14px;
|
|
1867
|
+
font-weight: 400;
|
|
1868
|
+
}
|
|
1869
|
+
.header .phase {
|
|
1870
|
+
display: inline-block;
|
|
1871
|
+
margin-top: 16px;
|
|
1872
|
+
padding: 8px 16px;
|
|
1873
|
+
background: #FFF;
|
|
1874
|
+
border: 1px solid #E5E3DE;
|
|
1875
|
+
border-radius: 20px;
|
|
1876
|
+
font-size: 13px;
|
|
1877
|
+
color: #1A1A1A;
|
|
1878
|
+
font-weight: 500;
|
|
1879
|
+
}
|
|
1880
|
+
.stats {
|
|
1881
|
+
display: flex;
|
|
1882
|
+
justify-content: center;
|
|
1883
|
+
gap: 16px;
|
|
1884
|
+
margin-bottom: 40px;
|
|
1885
|
+
flex-wrap: wrap;
|
|
1886
|
+
}
|
|
1887
|
+
.stat {
|
|
1888
|
+
background: #FFF;
|
|
1889
|
+
border: 1px solid #E5E3DE;
|
|
1890
|
+
border-radius: 12px;
|
|
1891
|
+
padding: 20px 32px;
|
|
1892
|
+
text-align: center;
|
|
1893
|
+
min-width: 140px;
|
|
1894
|
+
transition: box-shadow 0.2s ease;
|
|
1895
|
+
}
|
|
1896
|
+
.stat:hover { box-shadow: 0 4px 12px rgba(0,0,0,0.06); }
|
|
1897
|
+
.stat .number { font-size: 36px; font-weight: 600; margin-bottom: 4px; }
|
|
1898
|
+
.stat .label { font-size: 12px; color: #888; text-transform: uppercase; letter-spacing: 0.5px; }
|
|
1899
|
+
.stat.pending .number { color: #D97757; }
|
|
1900
|
+
.stat.progress .number { color: #5B8DEF; }
|
|
1901
|
+
.stat.completed .number { color: #2E9E6E; }
|
|
1902
|
+
.stat.failed .number { color: #D44F4F; }
|
|
1903
|
+
.stat.agents .number { color: #9B6DD6; }
|
|
1904
|
+
.section-header {
|
|
1905
|
+
text-align: center;
|
|
1906
|
+
font-size: 16px;
|
|
1907
|
+
font-weight: 600;
|
|
1908
|
+
color: #666;
|
|
1909
|
+
margin: 40px 0 20px 0;
|
|
1910
|
+
text-transform: uppercase;
|
|
1911
|
+
letter-spacing: 1px;
|
|
1912
|
+
}
|
|
1913
|
+
.agents-grid {
|
|
1914
|
+
display: grid;
|
|
1915
|
+
grid-template-columns: repeat(auto-fill, minmax(320px, 1fr));
|
|
1916
|
+
gap: 16px;
|
|
1917
|
+
max-width: 1400px;
|
|
1918
|
+
margin: 0 auto 40px auto;
|
|
1919
|
+
}
|
|
1920
|
+
.agent-card {
|
|
1921
|
+
background: #FFF;
|
|
1922
|
+
border: 1px solid #E5E3DE;
|
|
1923
|
+
border-radius: 12px;
|
|
1924
|
+
padding: 16px;
|
|
1925
|
+
transition: box-shadow 0.2s ease, border-color 0.2s ease;
|
|
1926
|
+
}
|
|
1927
|
+
.agent-card:hover {
|
|
1928
|
+
box-shadow: 0 4px 12px rgba(0,0,0,0.06);
|
|
1929
|
+
border-color: #9B6DD6;
|
|
1930
|
+
}
|
|
1931
|
+
.agent-card .agent-header {
|
|
1932
|
+
display: flex;
|
|
1933
|
+
justify-content: space-between;
|
|
1934
|
+
align-items: flex-start;
|
|
1935
|
+
margin-bottom: 12px;
|
|
1936
|
+
}
|
|
1937
|
+
.agent-card .agent-id {
|
|
1938
|
+
font-size: 11px;
|
|
1939
|
+
color: #999;
|
|
1940
|
+
font-family: monospace;
|
|
1941
|
+
}
|
|
1942
|
+
.agent-card .model-badge {
|
|
1943
|
+
padding: 4px 10px;
|
|
1944
|
+
border-radius: 6px;
|
|
1945
|
+
font-size: 10px;
|
|
1946
|
+
font-weight: 600;
|
|
1947
|
+
text-transform: uppercase;
|
|
1948
|
+
letter-spacing: 0.5px;
|
|
1949
|
+
}
|
|
1950
|
+
.agent-card .model-badge.sonnet {
|
|
1951
|
+
background: #E8F0FD;
|
|
1952
|
+
color: #5B8DEF;
|
|
1953
|
+
}
|
|
1954
|
+
.agent-card .model-badge.haiku {
|
|
1955
|
+
background: #FFF4E6;
|
|
1956
|
+
color: #F59E0B;
|
|
1957
|
+
}
|
|
1958
|
+
.agent-card .model-badge.opus {
|
|
1959
|
+
background: #F3E8FF;
|
|
1960
|
+
color: #9B6DD6;
|
|
1961
|
+
}
|
|
1962
|
+
.agent-card .agent-type {
|
|
1963
|
+
font-size: 14px;
|
|
1964
|
+
font-weight: 600;
|
|
1965
|
+
color: #1A1A1A;
|
|
1966
|
+
margin-bottom: 8px;
|
|
1967
|
+
}
|
|
1968
|
+
.agent-card .agent-status {
|
|
1969
|
+
display: inline-block;
|
|
1970
|
+
padding: 3px 8px;
|
|
1971
|
+
border-radius: 4px;
|
|
1972
|
+
font-size: 10px;
|
|
1973
|
+
font-weight: 500;
|
|
1974
|
+
margin-bottom: 12px;
|
|
1975
|
+
}
|
|
1976
|
+
.agent-card .agent-status.active {
|
|
1977
|
+
background: #E6F5EE;
|
|
1978
|
+
color: #2E9E6E;
|
|
1979
|
+
}
|
|
1980
|
+
.agent-card .agent-status.completed {
|
|
1981
|
+
background: #F0EFEA;
|
|
1982
|
+
color: #666;
|
|
1983
|
+
}
|
|
1984
|
+
.agent-card .agent-work {
|
|
1985
|
+
font-size: 12px;
|
|
1986
|
+
color: #666;
|
|
1987
|
+
line-height: 1.5;
|
|
1988
|
+
margin-bottom: 8px;
|
|
1989
|
+
}
|
|
1990
|
+
.agent-card .agent-meta {
|
|
1991
|
+
display: flex;
|
|
1992
|
+
gap: 12px;
|
|
1993
|
+
font-size: 11px;
|
|
1994
|
+
color: #999;
|
|
1995
|
+
margin-top: 8px;
|
|
1996
|
+
padding-top: 8px;
|
|
1997
|
+
border-top: 1px solid #F0EFEA;
|
|
1998
|
+
}
|
|
1999
|
+
.agent-card .agent-meta span {
|
|
2000
|
+
display: flex;
|
|
2001
|
+
align-items: center;
|
|
2002
|
+
gap: 4px;
|
|
2003
|
+
}
|
|
2004
|
+
.columns {
|
|
2005
|
+
display: flex;
|
|
2006
|
+
gap: 20px;
|
|
2007
|
+
overflow-x: auto;
|
|
2008
|
+
padding-bottom: 24px;
|
|
2009
|
+
max-width: 1400px;
|
|
2010
|
+
margin: 0 auto;
|
|
2011
|
+
}
|
|
2012
|
+
.column {
|
|
2013
|
+
flex: 1;
|
|
2014
|
+
min-width: 300px;
|
|
2015
|
+
max-width: 350px;
|
|
2016
|
+
background: #FFF;
|
|
2017
|
+
border: 1px solid #E5E3DE;
|
|
2018
|
+
border-radius: 12px;
|
|
2019
|
+
padding: 20px;
|
|
2020
|
+
}
|
|
2021
|
+
.column h2 {
|
|
2022
|
+
font-size: 13px;
|
|
2023
|
+
font-weight: 600;
|
|
2024
|
+
color: #666;
|
|
2025
|
+
margin-bottom: 16px;
|
|
2026
|
+
display: flex;
|
|
2027
|
+
align-items: center;
|
|
2028
|
+
gap: 10px;
|
|
2029
|
+
text-transform: uppercase;
|
|
2030
|
+
letter-spacing: 0.5px;
|
|
2031
|
+
}
|
|
2032
|
+
.column h2 .count {
|
|
2033
|
+
background: #F0EFEA;
|
|
2034
|
+
padding: 3px 10px;
|
|
2035
|
+
border-radius: 12px;
|
|
2036
|
+
font-size: 11px;
|
|
2037
|
+
color: #1A1A1A;
|
|
2038
|
+
}
|
|
2039
|
+
.column.pending h2 .count { background: #FCEEE8; color: #D97757; }
|
|
2040
|
+
.column.progress h2 .count { background: #E8F0FD; color: #5B8DEF; }
|
|
2041
|
+
.column.completed h2 .count { background: #E6F5EE; color: #2E9E6E; }
|
|
2042
|
+
.column.failed h2 .count { background: #FCE8E8; color: #D44F4F; }
|
|
2043
|
+
.task {
|
|
2044
|
+
background: #FAF9F6;
|
|
2045
|
+
border: 1px solid #E5E3DE;
|
|
2046
|
+
border-radius: 8px;
|
|
2047
|
+
padding: 14px;
|
|
2048
|
+
margin-bottom: 12px;
|
|
2049
|
+
transition: border-color 0.2s ease;
|
|
2050
|
+
}
|
|
2051
|
+
.task:hover { border-color: #D97757; }
|
|
2052
|
+
.task .id { font-size: 10px; color: #999; margin-bottom: 6px; font-family: monospace; }
|
|
2053
|
+
.task .type {
|
|
2054
|
+
display: inline-block;
|
|
2055
|
+
background: #FCEEE8;
|
|
2056
|
+
color: #D97757;
|
|
2057
|
+
padding: 3px 10px;
|
|
2058
|
+
border-radius: 4px;
|
|
2059
|
+
font-size: 11px;
|
|
2060
|
+
font-weight: 500;
|
|
2061
|
+
margin-bottom: 8px;
|
|
2062
|
+
}
|
|
2063
|
+
.task .title { font-size: 13px; color: #1A1A1A; line-height: 1.5; }
|
|
2064
|
+
.task .error {
|
|
2065
|
+
font-size: 11px;
|
|
2066
|
+
color: #D44F4F;
|
|
2067
|
+
margin-top: 10px;
|
|
2068
|
+
padding: 10px;
|
|
2069
|
+
background: #FCE8E8;
|
|
2070
|
+
border-radius: 6px;
|
|
2071
|
+
font-family: monospace;
|
|
2072
|
+
}
|
|
2073
|
+
.refresh {
|
|
2074
|
+
position: fixed;
|
|
2075
|
+
bottom: 24px;
|
|
2076
|
+
right: 24px;
|
|
2077
|
+
background: #D97757;
|
|
2078
|
+
color: white;
|
|
2079
|
+
border: none;
|
|
2080
|
+
padding: 12px 24px;
|
|
2081
|
+
border-radius: 8px;
|
|
2082
|
+
cursor: pointer;
|
|
2083
|
+
font-size: 14px;
|
|
2084
|
+
font-weight: 500;
|
|
2085
|
+
transition: background 0.2s ease;
|
|
2086
|
+
box-shadow: 0 4px 12px rgba(217, 119, 87, 0.3);
|
|
2087
|
+
}
|
|
2088
|
+
.refresh:hover { background: #C56747; }
|
|
2089
|
+
.updated {
|
|
2090
|
+
text-align: center;
|
|
2091
|
+
color: #999;
|
|
2092
|
+
font-size: 12px;
|
|
2093
|
+
margin-top: 24px;
|
|
2094
|
+
}
|
|
2095
|
+
.empty {
|
|
2096
|
+
color: #999;
|
|
2097
|
+
font-size: 13px;
|
|
2098
|
+
text-align: center;
|
|
2099
|
+
padding: 24px;
|
|
2100
|
+
font-style: italic;
|
|
2101
|
+
}
|
|
2102
|
+
.powered-by {
|
|
2103
|
+
text-align: center;
|
|
2104
|
+
margin-top: 40px;
|
|
2105
|
+
padding-top: 24px;
|
|
2106
|
+
border-top: 1px solid #E5E3DE;
|
|
2107
|
+
color: #999;
|
|
2108
|
+
font-size: 12px;
|
|
2109
|
+
}
|
|
2110
|
+
.powered-by span { color: #D97757; font-weight: 500; }
|
|
2111
|
+
</style>
|
|
2112
|
+
</head>
|
|
2113
|
+
<body>
|
|
2114
|
+
<div class="header">
|
|
2115
|
+
<h1>LOKI MODE</h1>
|
|
2116
|
+
<div class="subtitle">Autonomous Multi-Agent Startup System</div>
|
|
2117
|
+
<div class="phase" id="phase">Loading...</div>
|
|
2118
|
+
</div>
|
|
2119
|
+
<div class="stats">
|
|
2120
|
+
<div class="stat agents"><div class="number" id="agents-count">-</div><div class="label">Active Agents</div></div>
|
|
2121
|
+
<div class="stat pending"><div class="number" id="pending-count">-</div><div class="label">Pending</div></div>
|
|
2122
|
+
<div class="stat progress"><div class="number" id="progress-count">-</div><div class="label">In Progress</div></div>
|
|
2123
|
+
<div class="stat completed"><div class="number" id="completed-count">-</div><div class="label">Completed</div></div>
|
|
2124
|
+
<div class="stat failed"><div class="number" id="failed-count">-</div><div class="label">Failed</div></div>
|
|
2125
|
+
</div>
|
|
2126
|
+
<div class="section-header">Active Agents</div>
|
|
2127
|
+
<div class="agents-grid" id="agents-grid"></div>
|
|
2128
|
+
<div class="section-header">Task Queue</div>
|
|
2129
|
+
<div class="columns">
|
|
2130
|
+
<div class="column pending"><h2>Pending <span class="count" id="pending-badge">0</span></h2><div id="pending-tasks"></div></div>
|
|
2131
|
+
<div class="column progress"><h2>In Progress <span class="count" id="progress-badge">0</span></h2><div id="progress-tasks"></div></div>
|
|
2132
|
+
<div class="column completed"><h2>Completed <span class="count" id="completed-badge">0</span></h2><div id="completed-tasks"></div></div>
|
|
2133
|
+
<div class="column failed"><h2>Failed <span class="count" id="failed-badge">0</span></h2><div id="failed-tasks"></div></div>
|
|
2134
|
+
</div>
|
|
2135
|
+
<div class="updated" id="updated">Last updated: -</div>
|
|
2136
|
+
<div class="powered-by">Powered by <span>Claude</span></div>
|
|
2137
|
+
<button class="refresh" onclick="loadData()">Refresh</button>
|
|
2138
|
+
<script>
|
|
2139
|
+
async function loadJSON(path) {
|
|
2140
|
+
try {
|
|
2141
|
+
const res = await fetch(path + '?t=' + Date.now());
|
|
2142
|
+
if (!res.ok) return [];
|
|
2143
|
+
const text = await res.text();
|
|
2144
|
+
if (!text.trim()) return [];
|
|
2145
|
+
const data = JSON.parse(text);
|
|
2146
|
+
return Array.isArray(data) ? data : (data.tasks || data.agents || []);
|
|
2147
|
+
} catch { return []; }
|
|
2148
|
+
}
|
|
2149
|
+
function getModelClass(model) {
|
|
2150
|
+
if (!model) return 'sonnet';
|
|
2151
|
+
const m = model.toLowerCase();
|
|
2152
|
+
if (m.includes('haiku')) return 'haiku';
|
|
2153
|
+
if (m.includes('opus')) return 'opus';
|
|
2154
|
+
return 'sonnet';
|
|
2155
|
+
}
|
|
2156
|
+
function formatDuration(isoDate) {
|
|
2157
|
+
if (!isoDate) return 'Unknown';
|
|
2158
|
+
const start = new Date(isoDate);
|
|
2159
|
+
const now = new Date();
|
|
2160
|
+
const seconds = Math.floor((now - start) / 1000);
|
|
2161
|
+
if (seconds < 60) return seconds + 's';
|
|
2162
|
+
if (seconds < 3600) return Math.floor(seconds / 60) + 'm';
|
|
2163
|
+
return Math.floor(seconds / 3600) + 'h ' + Math.floor((seconds % 3600) / 60) + 'm';
|
|
2164
|
+
}
|
|
2165
|
+
function renderAgent(agent) {
|
|
2166
|
+
const modelClass = getModelClass(agent.model);
|
|
2167
|
+
const modelName = agent.model || 'Sonnet 4.5';
|
|
2168
|
+
const agentType = agent.agent_type || 'general-purpose';
|
|
2169
|
+
const status = agent.status === 'completed' ? 'completed' : 'active';
|
|
2170
|
+
const currentTask = agent.current_task || (agent.tasks_completed && agent.tasks_completed.length > 0
|
|
2171
|
+
? 'Completed: ' + agent.tasks_completed.join(', ')
|
|
2172
|
+
: 'Initializing...');
|
|
2173
|
+
const duration = formatDuration(agent.spawned_at);
|
|
2174
|
+
const tasksCount = agent.tasks_completed ? agent.tasks_completed.length : 0;
|
|
2175
|
+
|
|
2176
|
+
return `
|
|
2177
|
+
<div class="agent-card">
|
|
2178
|
+
<div class="agent-header">
|
|
2179
|
+
<div class="agent-id">${agent.agent_id || 'Unknown'}</div>
|
|
2180
|
+
<div class="model-badge ${modelClass}">${modelName}</div>
|
|
2181
|
+
</div>
|
|
2182
|
+
<div class="agent-type">${agentType}</div>
|
|
2183
|
+
<div class="agent-status ${status}">${status}</div>
|
|
2184
|
+
<div class="agent-work">${currentTask}</div>
|
|
2185
|
+
<div class="agent-meta">
|
|
2186
|
+
<span>⏱ ${duration}</span>
|
|
2187
|
+
<span>✓ ${tasksCount} tasks</span>
|
|
2188
|
+
</div>
|
|
2189
|
+
</div>
|
|
2190
|
+
`;
|
|
2191
|
+
}
|
|
2192
|
+
function renderTask(task) {
|
|
2193
|
+
const payload = task.payload || {};
|
|
2194
|
+
const title = payload.description || payload.action || task.type || 'Task';
|
|
2195
|
+
const error = task.lastError ? `<div class="error">${task.lastError}</div>` : '';
|
|
2196
|
+
return `<div class="task"><div class="id">${task.id}</div><span class="type">${task.type || 'general'}</span><div class="title">${title}</div>${error}</div>`;
|
|
2197
|
+
}
|
|
2198
|
+
async function loadData() {
|
|
2199
|
+
const [pending, progress, completed, failed, agents] = await Promise.all([
|
|
2200
|
+
loadJSON('../queue/pending.json'),
|
|
2201
|
+
loadJSON('../queue/in-progress.json'),
|
|
2202
|
+
loadJSON('../queue/completed.json'),
|
|
2203
|
+
loadJSON('../queue/failed.json'),
|
|
2204
|
+
loadJSON('../state/agents.json')
|
|
2205
|
+
]);
|
|
2206
|
+
|
|
2207
|
+
// Agent stats
|
|
2208
|
+
document.getElementById('agents-count').textContent = agents.length;
|
|
2209
|
+
document.getElementById('agents-grid').innerHTML = agents.length
|
|
2210
|
+
? agents.map(renderAgent).join('')
|
|
2211
|
+
: '<div class="empty">No active agents</div>';
|
|
2212
|
+
|
|
2213
|
+
// Task stats
|
|
2214
|
+
document.getElementById('pending-count').textContent = pending.length;
|
|
2215
|
+
document.getElementById('progress-count').textContent = progress.length;
|
|
2216
|
+
document.getElementById('completed-count').textContent = completed.length;
|
|
2217
|
+
document.getElementById('failed-count').textContent = failed.length;
|
|
2218
|
+
document.getElementById('pending-badge').textContent = pending.length;
|
|
2219
|
+
document.getElementById('progress-badge').textContent = progress.length;
|
|
2220
|
+
document.getElementById('completed-badge').textContent = completed.length;
|
|
2221
|
+
document.getElementById('failed-badge').textContent = failed.length;
|
|
2222
|
+
document.getElementById('pending-tasks').innerHTML = pending.length ? pending.map(renderTask).join('') : '<div class="empty">No pending tasks</div>';
|
|
2223
|
+
document.getElementById('progress-tasks').innerHTML = progress.length ? progress.map(renderTask).join('') : '<div class="empty">No tasks in progress</div>';
|
|
2224
|
+
document.getElementById('completed-tasks').innerHTML = completed.length ? completed.slice(-10).reverse().map(renderTask).join('') : '<div class="empty">No completed tasks</div>';
|
|
2225
|
+
document.getElementById('failed-tasks').innerHTML = failed.length ? failed.map(renderTask).join('') : '<div class="empty">No failed tasks</div>';
|
|
2226
|
+
|
|
2227
|
+
try {
|
|
2228
|
+
const state = await fetch('../state/orchestrator.json?t=' + Date.now()).then(r => r.json());
|
|
2229
|
+
document.getElementById('phase').textContent = 'Phase: ' + (state.currentPhase || 'UNKNOWN');
|
|
2230
|
+
} catch { document.getElementById('phase').textContent = 'Phase: UNKNOWN'; }
|
|
2231
|
+
document.getElementById('updated').textContent = 'Last updated: ' + new Date().toLocaleTimeString();
|
|
2232
|
+
}
|
|
2233
|
+
loadData();
|
|
2234
|
+
setInterval(loadData, 3000);
|
|
2235
|
+
</script>
|
|
2236
|
+
</body>
|
|
2237
|
+
</html>
|
|
2238
|
+
DASHBOARD_HTML
|
|
2239
|
+
}
|
|
2240
|
+
|
|
2241
|
+
update_agents_state() {
|
|
2242
|
+
# Aggregate agent information from .agent/sub-agents/*.json into .loki/state/agents.json
|
|
2243
|
+
local agents_dir=".agent/sub-agents"
|
|
2244
|
+
local output_file=".loki/state/agents.json"
|
|
2245
|
+
|
|
2246
|
+
# Initialize empty array if no agents directory
|
|
2247
|
+
if [ ! -d "$agents_dir" ]; then
|
|
2248
|
+
echo "[]" > "$output_file"
|
|
2249
|
+
return
|
|
2250
|
+
fi
|
|
2251
|
+
|
|
2252
|
+
# Find all agent JSON files and aggregate them
|
|
2253
|
+
local agents_json="["
|
|
2254
|
+
local first=true
|
|
2255
|
+
|
|
2256
|
+
for agent_file in "$agents_dir"/*.json; do
|
|
2257
|
+
# Skip if no JSON files exist
|
|
2258
|
+
[ -e "$agent_file" ] || continue
|
|
2259
|
+
|
|
2260
|
+
# Read agent JSON
|
|
2261
|
+
local agent_data=$(cat "$agent_file" 2>/dev/null)
|
|
2262
|
+
if [ -n "$agent_data" ]; then
|
|
2263
|
+
# Add comma separator for all but first entry
|
|
2264
|
+
if [ "$first" = true ]; then
|
|
2265
|
+
first=false
|
|
2266
|
+
else
|
|
2267
|
+
agents_json="${agents_json},"
|
|
2268
|
+
fi
|
|
2269
|
+
agents_json="${agents_json}${agent_data}"
|
|
2270
|
+
fi
|
|
2271
|
+
done
|
|
2272
|
+
|
|
2273
|
+
agents_json="${agents_json}]"
|
|
2274
|
+
|
|
2275
|
+
# Write aggregated data
|
|
2276
|
+
echo "$agents_json" > "$output_file"
|
|
2277
|
+
}
|
|
2278
|
+
|
|
2279
|
+
#===============================================================================
|
|
2280
|
+
# Resource Monitoring
|
|
2281
|
+
#===============================================================================
|
|
2282
|
+
|
|
2283
|
+
check_system_resources() {
|
|
2284
|
+
# Check CPU and memory usage and write status to .loki/state/resources.json
|
|
2285
|
+
local output_file=".loki/state/resources.json"
|
|
2286
|
+
|
|
2287
|
+
# Get CPU usage (average across all cores)
|
|
2288
|
+
local cpu_usage=0
|
|
2289
|
+
if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
2290
|
+
# macOS: get CPU idle from top header, calculate usage = 100 - idle
|
|
2291
|
+
local idle=$(top -l 2 -n 0 | grep "CPU usage" | tail -1 | awk -F'[:,]' '{for(i=1;i<=NF;i++) if($i ~ /idle/) print $(i)}' | awk '{print int($1)}')
|
|
2292
|
+
cpu_usage=$((100 - ${idle:-0}))
|
|
2293
|
+
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
|
2294
|
+
# Linux: use top or mpstat
|
|
2295
|
+
cpu_usage=$(top -bn2 | grep "Cpu(s)" | tail -1 | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | awk '{print int(100 - $1)}')
|
|
2296
|
+
else
|
|
2297
|
+
cpu_usage=0
|
|
2298
|
+
fi
|
|
2299
|
+
|
|
2300
|
+
# Get memory usage
|
|
2301
|
+
local mem_usage=0
|
|
2302
|
+
if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
2303
|
+
# macOS: use vm_stat
|
|
2304
|
+
local page_size=$(pagesize)
|
|
2305
|
+
local vm_stat=$(vm_stat)
|
|
2306
|
+
local pages_free=$(echo "$vm_stat" | awk '/Pages free/ {print $3}' | tr -d '.')
|
|
2307
|
+
local pages_active=$(echo "$vm_stat" | awk '/Pages active/ {print $3}' | tr -d '.')
|
|
2308
|
+
local pages_inactive=$(echo "$vm_stat" | awk '/Pages inactive/ {print $3}' | tr -d '.')
|
|
2309
|
+
local pages_speculative=$(echo "$vm_stat" | awk '/Pages speculative/ {print $3}' | tr -d '.')
|
|
2310
|
+
local pages_wired=$(echo "$vm_stat" | awk '/Pages wired down/ {print $4}' | tr -d '.')
|
|
2311
|
+
|
|
2312
|
+
local total_pages=$((pages_free + pages_active + pages_inactive + pages_speculative + pages_wired))
|
|
2313
|
+
local used_pages=$((pages_active + pages_wired))
|
|
2314
|
+
mem_usage=$((used_pages * 100 / total_pages))
|
|
2315
|
+
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
|
2316
|
+
# Linux: use free
|
|
2317
|
+
mem_usage=$(free | grep Mem | awk '{print int($3/$2 * 100)}')
|
|
2318
|
+
else
|
|
2319
|
+
mem_usage=0
|
|
2320
|
+
fi
|
|
2321
|
+
|
|
2322
|
+
# Determine status
|
|
2323
|
+
local cpu_status="ok"
|
|
2324
|
+
local mem_status="ok"
|
|
2325
|
+
local overall_status="ok"
|
|
2326
|
+
local warning_message=""
|
|
2327
|
+
|
|
2328
|
+
if [ "$cpu_usage" -ge "$RESOURCE_CPU_THRESHOLD" ]; then
|
|
2329
|
+
cpu_status="high"
|
|
2330
|
+
overall_status="warning"
|
|
2331
|
+
warning_message="CPU usage is ${cpu_usage}% (threshold: ${RESOURCE_CPU_THRESHOLD}%). Consider reducing parallel agent count or pausing non-critical tasks."
|
|
2332
|
+
fi
|
|
2333
|
+
|
|
2334
|
+
if [ "$mem_usage" -ge "$RESOURCE_MEM_THRESHOLD" ]; then
|
|
2335
|
+
mem_status="high"
|
|
2336
|
+
overall_status="warning"
|
|
2337
|
+
if [ -n "$warning_message" ]; then
|
|
2338
|
+
warning_message="${warning_message} Memory usage is ${mem_usage}% (threshold: ${RESOURCE_MEM_THRESHOLD}%)."
|
|
2339
|
+
else
|
|
2340
|
+
warning_message="Memory usage is ${mem_usage}% (threshold: ${RESOURCE_MEM_THRESHOLD}%). Consider reducing parallel agent count or cleaning up resources."
|
|
2341
|
+
fi
|
|
2342
|
+
fi
|
|
2343
|
+
|
|
2344
|
+
# Write JSON status
|
|
2345
|
+
cat > "$output_file" << EOF
|
|
2346
|
+
{
|
|
2347
|
+
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
|
2348
|
+
"cpu": {
|
|
2349
|
+
"usage_percent": $cpu_usage,
|
|
2350
|
+
"threshold_percent": $RESOURCE_CPU_THRESHOLD,
|
|
2351
|
+
"status": "$cpu_status"
|
|
2352
|
+
},
|
|
2353
|
+
"memory": {
|
|
2354
|
+
"usage_percent": $mem_usage,
|
|
2355
|
+
"threshold_percent": $RESOURCE_MEM_THRESHOLD,
|
|
2356
|
+
"status": "$mem_status"
|
|
2357
|
+
},
|
|
2358
|
+
"overall_status": "$overall_status",
|
|
2359
|
+
"warning_message": "$warning_message"
|
|
2360
|
+
}
|
|
2361
|
+
EOF
|
|
2362
|
+
|
|
2363
|
+
# Log warning if resources are high
|
|
2364
|
+
if [ "$overall_status" = "warning" ]; then
|
|
2365
|
+
log_warn "RESOURCE WARNING: $warning_message"
|
|
2366
|
+
fi
|
|
2367
|
+
}
|
|
2368
|
+
|
|
2369
|
+
start_resource_monitor() {
|
|
2370
|
+
log_step "Starting resource monitor (checks every ${RESOURCE_CHECK_INTERVAL}s)..."
|
|
2371
|
+
|
|
2372
|
+
# Initial check
|
|
2373
|
+
check_system_resources
|
|
2374
|
+
|
|
2375
|
+
# Background monitoring loop
|
|
2376
|
+
(
|
|
2377
|
+
while true; do
|
|
2378
|
+
sleep "$RESOURCE_CHECK_INTERVAL"
|
|
2379
|
+
check_system_resources
|
|
2380
|
+
done
|
|
2381
|
+
) &
|
|
2382
|
+
RESOURCE_MONITOR_PID=$!
|
|
2383
|
+
|
|
2384
|
+
log_info "Resource monitor started (CPU threshold: ${RESOURCE_CPU_THRESHOLD}%, Memory threshold: ${RESOURCE_MEM_THRESHOLD}%)"
|
|
2385
|
+
log_info "Check status: ${CYAN}cat .loki/state/resources.json${NC}"
|
|
2386
|
+
}
|
|
2387
|
+
|
|
2388
|
+
stop_resource_monitor() {
|
|
2389
|
+
if [ -n "$RESOURCE_MONITOR_PID" ]; then
|
|
2390
|
+
kill "$RESOURCE_MONITOR_PID" 2>/dev/null || true
|
|
2391
|
+
wait "$RESOURCE_MONITOR_PID" 2>/dev/null || true
|
|
2392
|
+
fi
|
|
2393
|
+
}
|
|
2394
|
+
|
|
2395
|
+
#===============================================================================
|
|
2396
|
+
# Audit Logging (Enterprise Security)
|
|
2397
|
+
#===============================================================================
|
|
2398
|
+
|
|
2399
|
+
audit_log() {
|
|
2400
|
+
# Log security-relevant events for enterprise compliance
|
|
2401
|
+
local event_type="$1"
|
|
2402
|
+
local event_data="$2"
|
|
2403
|
+
local audit_file=".loki/logs/audit-$(date +%Y%m%d).jsonl"
|
|
2404
|
+
|
|
2405
|
+
if [ "$AUDIT_LOG_ENABLED" != "true" ]; then
|
|
2406
|
+
return
|
|
2407
|
+
fi
|
|
2408
|
+
|
|
2409
|
+
mkdir -p .loki/logs
|
|
2410
|
+
|
|
2411
|
+
local log_entry=$(cat << EOF
|
|
2412
|
+
{"timestamp":"$(date -u +%Y-%m-%dT%H:%M:%SZ)","event":"$event_type","data":"$event_data","user":"$(whoami)","pid":$$}
|
|
2413
|
+
EOF
|
|
2414
|
+
)
|
|
2415
|
+
echo "$log_entry" >> "$audit_file"
|
|
2416
|
+
}
|
|
2417
|
+
|
|
2418
|
+
check_staged_autonomy() {
|
|
2419
|
+
# In staged autonomy mode, write plan and wait for approval
|
|
2420
|
+
local plan_file="$1"
|
|
2421
|
+
|
|
2422
|
+
if [ "$STAGED_AUTONOMY" != "true" ]; then
|
|
2423
|
+
return 0
|
|
2424
|
+
fi
|
|
2425
|
+
|
|
2426
|
+
log_info "STAGED AUTONOMY: Waiting for plan approval..."
|
|
2427
|
+
log_info "Review plan at: $plan_file"
|
|
2428
|
+
log_info "Create .loki/signals/PLAN_APPROVED to continue"
|
|
2429
|
+
|
|
2430
|
+
audit_log "STAGED_AUTONOMY_WAIT" "plan=$plan_file"
|
|
2431
|
+
|
|
2432
|
+
# Wait for approval signal
|
|
2433
|
+
while [ ! -f ".loki/signals/PLAN_APPROVED" ]; do
|
|
2434
|
+
sleep 5
|
|
2435
|
+
done
|
|
2436
|
+
|
|
2437
|
+
rm -f ".loki/signals/PLAN_APPROVED"
|
|
2438
|
+
audit_log "STAGED_AUTONOMY_APPROVED" "plan=$plan_file"
|
|
2439
|
+
log_success "Plan approved, continuing execution..."
|
|
2440
|
+
}
|
|
2441
|
+
|
|
2442
|
+
check_command_allowed() {
|
|
2443
|
+
# Check if a command is in the blocked list
|
|
2444
|
+
local command="$1"
|
|
2445
|
+
|
|
2446
|
+
IFS=',' read -ra BLOCKED_ARRAY <<< "$BLOCKED_COMMANDS"
|
|
2447
|
+
for blocked in "${BLOCKED_ARRAY[@]}"; do
|
|
2448
|
+
if [[ "$command" == *"$blocked"* ]]; then
|
|
2449
|
+
audit_log "BLOCKED_COMMAND" "command=$command,pattern=$blocked"
|
|
2450
|
+
log_error "SECURITY: Blocked dangerous command: $command"
|
|
2451
|
+
return 1
|
|
2452
|
+
fi
|
|
2453
|
+
done
|
|
2454
|
+
|
|
2455
|
+
return 0
|
|
2456
|
+
}
|
|
2457
|
+
|
|
2458
|
+
#===============================================================================
|
|
2459
|
+
# Cross-Project Learnings Database
|
|
2460
|
+
#===============================================================================
|
|
2461
|
+
|
|
2462
|
+
init_learnings_db() {
|
|
2463
|
+
# Initialize the cross-project learnings database
|
|
2464
|
+
local learnings_dir="${HOME}/.loki/learnings"
|
|
2465
|
+
mkdir -p "$learnings_dir"
|
|
2466
|
+
|
|
2467
|
+
# Create database files if they don't exist
|
|
2468
|
+
if [ ! -f "$learnings_dir/patterns.jsonl" ]; then
|
|
2469
|
+
echo '{"version":"1.0","created":"'"$(date -u +%Y-%m-%dT%H:%M:%SZ)"'"}' > "$learnings_dir/patterns.jsonl"
|
|
2470
|
+
fi
|
|
2471
|
+
|
|
2472
|
+
if [ ! -f "$learnings_dir/mistakes.jsonl" ]; then
|
|
2473
|
+
echo '{"version":"1.0","created":"'"$(date -u +%Y-%m-%dT%H:%M:%SZ)"'"}' > "$learnings_dir/mistakes.jsonl"
|
|
2474
|
+
fi
|
|
2475
|
+
|
|
2476
|
+
if [ ! -f "$learnings_dir/successes.jsonl" ]; then
|
|
2477
|
+
echo '{"version":"1.0","created":"'"$(date -u +%Y-%m-%dT%H:%M:%SZ)"'"}' > "$learnings_dir/successes.jsonl"
|
|
2478
|
+
fi
|
|
2479
|
+
|
|
2480
|
+
log_info "Learnings database initialized at: $learnings_dir"
|
|
2481
|
+
}
|
|
2482
|
+
|
|
2483
|
+
save_learning() {
|
|
2484
|
+
# Save a learning to the cross-project database
|
|
2485
|
+
local learning_type="$1" # pattern, mistake, success
|
|
2486
|
+
local category="$2"
|
|
2487
|
+
local description="$3"
|
|
2488
|
+
local project="${4:-$(basename "$(pwd)")}"
|
|
2489
|
+
|
|
2490
|
+
local learnings_dir="${HOME}/.loki/learnings"
|
|
2491
|
+
local target_file="$learnings_dir/${learning_type}s.jsonl"
|
|
2492
|
+
|
|
2493
|
+
if [ ! -d "$learnings_dir" ]; then
|
|
2494
|
+
init_learnings_db
|
|
2495
|
+
fi
|
|
2496
|
+
|
|
2497
|
+
local learning_entry=$(cat << EOF
|
|
2498
|
+
{"timestamp":"$(date -u +%Y-%m-%dT%H:%M:%SZ)","project":"$project","category":"$category","description":"$description"}
|
|
2499
|
+
EOF
|
|
2500
|
+
)
|
|
2501
|
+
echo "$learning_entry" >> "$target_file"
|
|
2502
|
+
log_info "Saved $learning_type: $category"
|
|
2503
|
+
}
|
|
2504
|
+
|
|
2505
|
+
get_relevant_learnings() {
|
|
2506
|
+
# Get learnings relevant to the current context
|
|
2507
|
+
local context="$1"
|
|
2508
|
+
local learnings_dir="${HOME}/.loki/learnings"
|
|
2509
|
+
local output_file=".loki/state/relevant-learnings.json"
|
|
2510
|
+
|
|
2511
|
+
if [ ! -d "$learnings_dir" ]; then
|
|
2512
|
+
echo '{"patterns":[],"mistakes":[],"successes":[]}' > "$output_file"
|
|
2513
|
+
return
|
|
2514
|
+
fi
|
|
2515
|
+
|
|
2516
|
+
# Simple grep-based relevance (can be enhanced with embeddings)
|
|
2517
|
+
# Pass context via environment variable to avoid quote escaping issues
|
|
2518
|
+
export LOKI_CONTEXT="$context"
|
|
2519
|
+
python3 << 'LEARNINGS_SCRIPT'
|
|
2520
|
+
import json
|
|
2521
|
+
import os
|
|
2522
|
+
|
|
2523
|
+
learnings_dir = os.path.expanduser("~/.loki/learnings")
|
|
2524
|
+
context = os.environ.get("LOKI_CONTEXT", "").lower()
|
|
2525
|
+
|
|
2526
|
+
def load_jsonl(filepath):
|
|
2527
|
+
entries = []
|
|
2528
|
+
try:
|
|
2529
|
+
with open(filepath, 'r') as f:
|
|
2530
|
+
for line in f:
|
|
2531
|
+
try:
|
|
2532
|
+
entry = json.loads(line)
|
|
2533
|
+
if 'description' in entry:
|
|
2534
|
+
entries.append(entry)
|
|
2535
|
+
except:
|
|
2536
|
+
continue
|
|
2537
|
+
except:
|
|
2538
|
+
pass
|
|
2539
|
+
return entries
|
|
2540
|
+
|
|
2541
|
+
def filter_relevant(entries, context, limit=5):
|
|
2542
|
+
scored = []
|
|
2543
|
+
for e in entries:
|
|
2544
|
+
desc = e.get('description', '').lower()
|
|
2545
|
+
cat = e.get('category', '').lower()
|
|
2546
|
+
score = sum(1 for word in context.split() if word in desc or word in cat)
|
|
2547
|
+
if score > 0:
|
|
2548
|
+
scored.append((score, e))
|
|
2549
|
+
scored.sort(reverse=True, key=lambda x: x[0])
|
|
2550
|
+
return [e for _, e in scored[:limit]]
|
|
2551
|
+
|
|
2552
|
+
patterns = load_jsonl(f"{learnings_dir}/patterns.jsonl")
|
|
2553
|
+
mistakes = load_jsonl(f"{learnings_dir}/mistakes.jsonl")
|
|
2554
|
+
successes = load_jsonl(f"{learnings_dir}/successes.jsonl")
|
|
2555
|
+
|
|
2556
|
+
result = {
|
|
2557
|
+
"patterns": filter_relevant(patterns, context),
|
|
2558
|
+
"mistakes": filter_relevant(mistakes, context),
|
|
2559
|
+
"successes": filter_relevant(successes, context)
|
|
2560
|
+
}
|
|
2561
|
+
|
|
2562
|
+
with open(".loki/state/relevant-learnings.json", 'w') as f:
|
|
2563
|
+
json.dump(result, f, indent=2)
|
|
2564
|
+
LEARNINGS_SCRIPT
|
|
2565
|
+
|
|
2566
|
+
log_info "Loaded relevant learnings to: $output_file"
|
|
2567
|
+
}
|
|
2568
|
+
|
|
2569
|
+
extract_learnings_from_session() {
|
|
2570
|
+
# Extract learnings from completed session
|
|
2571
|
+
local continuity_file=".loki/CONTINUITY.md"
|
|
2572
|
+
|
|
2573
|
+
if [ ! -f "$continuity_file" ]; then
|
|
2574
|
+
return
|
|
2575
|
+
fi
|
|
2576
|
+
|
|
2577
|
+
log_info "Extracting learnings from session..."
|
|
2578
|
+
|
|
2579
|
+
# Parse CONTINUITY.md for Mistakes & Learnings section
|
|
2580
|
+
python3 << EXTRACT_SCRIPT
|
|
2581
|
+
import re
|
|
2582
|
+
import json
|
|
2583
|
+
import os
|
|
2584
|
+
from datetime import datetime, timezone
|
|
2585
|
+
|
|
2586
|
+
continuity_file = ".loki/CONTINUITY.md"
|
|
2587
|
+
learnings_dir = os.path.expanduser("~/.loki/learnings")
|
|
2588
|
+
|
|
2589
|
+
if not os.path.exists(continuity_file):
|
|
2590
|
+
exit(0)
|
|
2591
|
+
|
|
2592
|
+
with open(continuity_file, 'r') as f:
|
|
2593
|
+
content = f.read()
|
|
2594
|
+
|
|
2595
|
+
# Find Mistakes & Learnings section
|
|
2596
|
+
mistakes_match = re.search(r'## Mistakes & Learnings\n(.*?)(?=\n## |\Z)', content, re.DOTALL)
|
|
2597
|
+
if mistakes_match:
|
|
2598
|
+
mistakes_text = mistakes_match.group(1)
|
|
2599
|
+
# Extract bullet points
|
|
2600
|
+
bullets = re.findall(r'[-*]\s+(.+)', mistakes_text)
|
|
2601
|
+
for bullet in bullets:
|
|
2602
|
+
entry = {
|
|
2603
|
+
"timestamp": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
|
|
2604
|
+
"project": os.path.basename(os.getcwd()),
|
|
2605
|
+
"category": "session",
|
|
2606
|
+
"description": bullet.strip()
|
|
2607
|
+
}
|
|
2608
|
+
with open(f"{learnings_dir}/mistakes.jsonl", 'a') as f:
|
|
2609
|
+
f.write(json.dumps(entry) + "\n")
|
|
2610
|
+
print(f"Extracted: {bullet[:50]}...")
|
|
2611
|
+
|
|
2612
|
+
print("Learning extraction complete")
|
|
2613
|
+
EXTRACT_SCRIPT
|
|
2614
|
+
}
|
|
2615
|
+
|
|
2616
|
+
start_dashboard() {
|
|
2617
|
+
log_header "Starting Loki Dashboard"
|
|
2618
|
+
|
|
2619
|
+
# Create dashboard directory
|
|
2620
|
+
mkdir -p .loki/dashboard
|
|
2621
|
+
|
|
2622
|
+
# Generate HTML
|
|
2623
|
+
generate_dashboard
|
|
2624
|
+
|
|
2625
|
+
# Kill any existing process on the dashboard port
|
|
2626
|
+
if lsof -i :$DASHBOARD_PORT &>/dev/null; then
|
|
2627
|
+
log_step "Killing existing process on port $DASHBOARD_PORT..."
|
|
2628
|
+
lsof -ti :$DASHBOARD_PORT | xargs kill -9 2>/dev/null || true
|
|
2629
|
+
sleep 1
|
|
2630
|
+
fi
|
|
2631
|
+
|
|
2632
|
+
# Start Python HTTP server from .loki/ root so it can serve queue/ and state/
|
|
2633
|
+
log_step "Starting dashboard server..."
|
|
2634
|
+
(
|
|
2635
|
+
cd .loki
|
|
2636
|
+
python3 -m http.server $DASHBOARD_PORT --bind 127.0.0.1 2>&1 | while read line; do
|
|
2637
|
+
echo "[dashboard] $line" >> logs/dashboard.log
|
|
2638
|
+
done
|
|
2639
|
+
) &
|
|
2640
|
+
DASHBOARD_PID=$!
|
|
2641
|
+
|
|
2642
|
+
sleep 1
|
|
2643
|
+
|
|
2644
|
+
if kill -0 $DASHBOARD_PID 2>/dev/null; then
|
|
2645
|
+
log_info "Dashboard started (PID: $DASHBOARD_PID)"
|
|
2646
|
+
log_info "Dashboard: ${CYAN}http://127.0.0.1:$DASHBOARD_PORT/dashboard/index.html${NC}"
|
|
2647
|
+
|
|
2648
|
+
# Open in browser (macOS)
|
|
2649
|
+
if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
2650
|
+
open "http://127.0.0.1:$DASHBOARD_PORT/dashboard/index.html" 2>/dev/null || true
|
|
2651
|
+
fi
|
|
2652
|
+
return 0
|
|
2653
|
+
else
|
|
2654
|
+
log_warn "Dashboard failed to start"
|
|
2655
|
+
DASHBOARD_PID=""
|
|
2656
|
+
return 1
|
|
2657
|
+
fi
|
|
2658
|
+
}
|
|
2659
|
+
|
|
2660
|
+
stop_dashboard() {
|
|
2661
|
+
if [ -n "$DASHBOARD_PID" ]; then
|
|
2662
|
+
kill "$DASHBOARD_PID" 2>/dev/null || true
|
|
2663
|
+
wait "$DASHBOARD_PID" 2>/dev/null || true
|
|
2664
|
+
fi
|
|
2665
|
+
}
|
|
2666
|
+
|
|
2667
|
+
#===============================================================================
|
|
2668
|
+
# Calculate Exponential Backoff
|
|
2669
|
+
#===============================================================================
|
|
2670
|
+
|
|
2671
|
+
calculate_wait() {
|
|
2672
|
+
local retry="$1"
|
|
2673
|
+
local wait_time=$((BASE_WAIT * (2 ** retry)))
|
|
2674
|
+
|
|
2675
|
+
# Add jitter (0-30 seconds)
|
|
2676
|
+
local jitter=$((RANDOM % 30))
|
|
2677
|
+
wait_time=$((wait_time + jitter))
|
|
2678
|
+
|
|
2679
|
+
# Cap at max wait
|
|
2680
|
+
if [ $wait_time -gt $MAX_WAIT ]; then
|
|
2681
|
+
wait_time=$MAX_WAIT
|
|
2682
|
+
fi
|
|
2683
|
+
|
|
2684
|
+
echo $wait_time
|
|
2685
|
+
}
|
|
2686
|
+
|
|
2687
|
+
#===============================================================================
|
|
2688
|
+
# Rate Limit Detection
|
|
2689
|
+
#===============================================================================
|
|
2690
|
+
|
|
2691
|
+
# Detect rate limit from log and calculate wait time until reset
|
|
2692
|
+
# Returns: seconds to wait, or 0 if no rate limit detected
|
|
2693
|
+
detect_rate_limit() {
|
|
2694
|
+
local log_file="$1"
|
|
2695
|
+
|
|
2696
|
+
# Look for rate limit message like "resets 4am" or "resets 10pm"
|
|
2697
|
+
local reset_time=$(grep -o "resets [0-9]\+[ap]m" "$log_file" 2>/dev/null | tail -1 | grep -o "[0-9]\+[ap]m")
|
|
2698
|
+
|
|
2699
|
+
if [ -z "$reset_time" ]; then
|
|
2700
|
+
echo 0
|
|
2701
|
+
return
|
|
2702
|
+
fi
|
|
2703
|
+
|
|
2704
|
+
# Parse the reset time
|
|
2705
|
+
local hour=$(echo "$reset_time" | grep -o "[0-9]\+")
|
|
2706
|
+
local ampm=$(echo "$reset_time" | grep -o "[ap]m")
|
|
2707
|
+
|
|
2708
|
+
# Convert to 24-hour format
|
|
2709
|
+
if [ "$ampm" = "pm" ] && [ "$hour" -ne 12 ]; then
|
|
2710
|
+
hour=$((hour + 12))
|
|
2711
|
+
elif [ "$ampm" = "am" ] && [ "$hour" -eq 12 ]; then
|
|
2712
|
+
hour=0
|
|
2713
|
+
fi
|
|
2714
|
+
|
|
2715
|
+
# Get current time
|
|
2716
|
+
local current_hour=$(date +%H)
|
|
2717
|
+
local current_min=$(date +%M)
|
|
2718
|
+
local current_sec=$(date +%S)
|
|
2719
|
+
|
|
2720
|
+
# Calculate seconds until reset
|
|
2721
|
+
local current_secs=$((current_hour * 3600 + current_min * 60 + current_sec))
|
|
2722
|
+
local reset_secs=$((hour * 3600))
|
|
2723
|
+
|
|
2724
|
+
local wait_secs=$((reset_secs - current_secs))
|
|
2725
|
+
|
|
2726
|
+
# If reset time is in the past, it means tomorrow
|
|
2727
|
+
if [ $wait_secs -le 0 ]; then
|
|
2728
|
+
wait_secs=$((wait_secs + 86400)) # Add 24 hours
|
|
2729
|
+
fi
|
|
2730
|
+
|
|
2731
|
+
# Add 2 minute buffer to ensure limit is actually reset
|
|
2732
|
+
wait_secs=$((wait_secs + 120))
|
|
2733
|
+
|
|
2734
|
+
echo $wait_secs
|
|
2735
|
+
}
|
|
2736
|
+
|
|
2737
|
+
# Format seconds into human-readable time
|
|
2738
|
+
format_duration() {
|
|
2739
|
+
local secs="$1"
|
|
2740
|
+
local hours=$((secs / 3600))
|
|
2741
|
+
local mins=$(((secs % 3600) / 60))
|
|
2742
|
+
|
|
2743
|
+
if [ $hours -gt 0 ]; then
|
|
2744
|
+
echo "${hours}h ${mins}m"
|
|
2745
|
+
else
|
|
2746
|
+
echo "${mins}m"
|
|
2747
|
+
fi
|
|
2748
|
+
}
|
|
2749
|
+
|
|
2750
|
+
#===============================================================================
|
|
2751
|
+
# Check Completion
|
|
2752
|
+
#===============================================================================
|
|
2753
|
+
|
|
2754
|
+
is_completed() {
|
|
2755
|
+
# Check orchestrator state
|
|
2756
|
+
if [ -f ".loki/state/orchestrator.json" ]; then
|
|
2757
|
+
if command -v python3 &> /dev/null; then
|
|
2758
|
+
local phase=$(python3 -c "import json; print(json.load(open('.loki/state/orchestrator.json')).get('currentPhase', ''))" 2>/dev/null || echo "")
|
|
2759
|
+
# Accept various completion states
|
|
2760
|
+
if [ "$phase" = "COMPLETED" ] || [ "$phase" = "complete" ] || [ "$phase" = "finalized" ] || [ "$phase" = "growth-loop" ]; then
|
|
2761
|
+
return 0
|
|
2762
|
+
fi
|
|
2763
|
+
fi
|
|
2764
|
+
fi
|
|
2765
|
+
|
|
2766
|
+
# Check for completion marker
|
|
2767
|
+
if [ -f ".loki/COMPLETED" ]; then
|
|
2768
|
+
return 0
|
|
2769
|
+
fi
|
|
2770
|
+
|
|
2771
|
+
return 1
|
|
2772
|
+
}
|
|
2773
|
+
|
|
2774
|
+
# Check if completion promise is fulfilled in log output
|
|
2775
|
+
check_completion_promise() {
|
|
2776
|
+
local log_file="$1"
|
|
2777
|
+
|
|
2778
|
+
# Check for the completion promise phrase in recent log output
|
|
2779
|
+
if grep -q "COMPLETION PROMISE FULFILLED" "$log_file" 2>/dev/null; then
|
|
2780
|
+
return 0
|
|
2781
|
+
fi
|
|
2782
|
+
|
|
2783
|
+
# Check for custom completion promise text
|
|
2784
|
+
if [ -n "$COMPLETION_PROMISE" ] && grep -qF "$COMPLETION_PROMISE" "$log_file" 2>/dev/null; then
|
|
2785
|
+
return 0
|
|
2786
|
+
fi
|
|
2787
|
+
|
|
2788
|
+
return 1
|
|
2789
|
+
}
|
|
2790
|
+
|
|
2791
|
+
# Check if max iterations reached
|
|
2792
|
+
check_max_iterations() {
|
|
2793
|
+
if [ $ITERATION_COUNT -ge $MAX_ITERATIONS ]; then
|
|
2794
|
+
log_warn "Max iterations ($MAX_ITERATIONS) reached. Stopping."
|
|
2795
|
+
return 0
|
|
2796
|
+
fi
|
|
2797
|
+
return 1
|
|
2798
|
+
}
|
|
2799
|
+
|
|
2800
|
+
# Check if context clear was requested by agent
|
|
2801
|
+
check_context_clear_signal() {
|
|
2802
|
+
if [ -f ".loki/signals/CONTEXT_CLEAR_REQUESTED" ]; then
|
|
2803
|
+
log_info "Context clear signal detected from agent"
|
|
2804
|
+
rm -f ".loki/signals/CONTEXT_CLEAR_REQUESTED"
|
|
2805
|
+
return 0
|
|
2806
|
+
fi
|
|
2807
|
+
return 1
|
|
2808
|
+
}
|
|
2809
|
+
|
|
2810
|
+
# Load latest ledger content for context injection
|
|
2811
|
+
load_ledger_context() {
|
|
2812
|
+
local ledger_content=""
|
|
2813
|
+
|
|
2814
|
+
# Find most recent ledger
|
|
2815
|
+
local latest_ledger=$(ls -t .loki/memory/ledgers/LEDGER-*.md 2>/dev/null | head -1)
|
|
2816
|
+
|
|
2817
|
+
if [ -n "$latest_ledger" ] && [ -f "$latest_ledger" ]; then
|
|
2818
|
+
ledger_content=$(cat "$latest_ledger" | head -100)
|
|
2819
|
+
echo "$ledger_content"
|
|
2820
|
+
fi
|
|
2821
|
+
}
|
|
2822
|
+
|
|
2823
|
+
# Load recent handoffs for context
|
|
2824
|
+
load_handoff_context() {
|
|
2825
|
+
local handoff_content=""
|
|
2826
|
+
|
|
2827
|
+
# Find most recent handoff (last 24 hours)
|
|
2828
|
+
local recent_handoff=$(find .loki/memory/handoffs -name "*.md" -mtime -1 2>/dev/null | head -1)
|
|
2829
|
+
|
|
2830
|
+
if [ -n "$recent_handoff" ] && [ -f "$recent_handoff" ]; then
|
|
2831
|
+
handoff_content=$(cat "$recent_handoff" | head -80)
|
|
2832
|
+
echo "$handoff_content"
|
|
2833
|
+
fi
|
|
2834
|
+
}
|
|
2835
|
+
|
|
2836
|
+
# Load relevant learnings
|
|
2837
|
+
load_learnings_context() {
|
|
2838
|
+
local learnings=""
|
|
2839
|
+
|
|
2840
|
+
# Get recent learnings (last 7 days)
|
|
2841
|
+
for learning in $(find .loki/memory/learnings -name "*.md" -mtime -7 2>/dev/null | head -5); do
|
|
2842
|
+
learnings+="$(head -30 "$learning")\n---\n"
|
|
2843
|
+
done
|
|
2844
|
+
|
|
2845
|
+
echo -e "$learnings"
|
|
2846
|
+
}
|
|
2847
|
+
|
|
2848
|
+
#===============================================================================
|
|
2849
|
+
# Save/Load Wrapper State
|
|
2850
|
+
#===============================================================================
|
|
2851
|
+
|
|
2852
|
+
save_state() {
|
|
2853
|
+
local retry_count="$1"
|
|
2854
|
+
local status="$2"
|
|
2855
|
+
local exit_code="$3"
|
|
2856
|
+
|
|
2857
|
+
cat > ".loki/autonomy-state.json" << EOF
|
|
2858
|
+
{
|
|
2859
|
+
"retryCount": $retry_count,
|
|
2860
|
+
"status": "$status",
|
|
2861
|
+
"lastExitCode": $exit_code,
|
|
2862
|
+
"lastRun": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
|
2863
|
+
"prdPath": "${PRD_PATH:-}",
|
|
2864
|
+
"pid": $$,
|
|
2865
|
+
"maxRetries": $MAX_RETRIES,
|
|
2866
|
+
"baseWait": $BASE_WAIT
|
|
2867
|
+
}
|
|
2868
|
+
EOF
|
|
2869
|
+
}
|
|
2870
|
+
|
|
2871
|
+
load_state() {
|
|
2872
|
+
if [ -f ".loki/autonomy-state.json" ]; then
|
|
2873
|
+
if command -v python3 &> /dev/null; then
|
|
2874
|
+
RETRY_COUNT=$(python3 -c "import json; print(json.load(open('.loki/autonomy-state.json')).get('retryCount', 0))" 2>/dev/null || echo "0")
|
|
2875
|
+
else
|
|
2876
|
+
RETRY_COUNT=0
|
|
2877
|
+
fi
|
|
2878
|
+
else
|
|
2879
|
+
RETRY_COUNT=0
|
|
2880
|
+
fi
|
|
2881
|
+
}
|
|
2882
|
+
|
|
2883
|
+
#===============================================================================
|
|
2884
|
+
# Build Resume Prompt
|
|
2885
|
+
#===============================================================================
|
|
2886
|
+
|
|
2887
|
+
build_prompt() {
|
|
2888
|
+
local retry="$1"
|
|
2889
|
+
local prd="$2"
|
|
2890
|
+
local iteration="$3"
|
|
2891
|
+
|
|
2892
|
+
# Build SDLC phases configuration
|
|
2893
|
+
local phases=""
|
|
2894
|
+
[ "$PHASE_UNIT_TESTS" = "true" ] && phases="${phases}UNIT_TESTS,"
|
|
2895
|
+
[ "$PHASE_API_TESTS" = "true" ] && phases="${phases}API_TESTS,"
|
|
2896
|
+
[ "$PHASE_E2E_TESTS" = "true" ] && phases="${phases}E2E_TESTS,"
|
|
2897
|
+
[ "$PHASE_SECURITY" = "true" ] && phases="${phases}SECURITY,"
|
|
2898
|
+
[ "$PHASE_INTEGRATION" = "true" ] && phases="${phases}INTEGRATION,"
|
|
2899
|
+
[ "$PHASE_CODE_REVIEW" = "true" ] && phases="${phases}CODE_REVIEW,"
|
|
2900
|
+
[ "$PHASE_WEB_RESEARCH" = "true" ] && phases="${phases}WEB_RESEARCH,"
|
|
2901
|
+
[ "$PHASE_PERFORMANCE" = "true" ] && phases="${phases}PERFORMANCE,"
|
|
2902
|
+
[ "$PHASE_ACCESSIBILITY" = "true" ] && phases="${phases}ACCESSIBILITY,"
|
|
2903
|
+
[ "$PHASE_REGRESSION" = "true" ] && phases="${phases}REGRESSION,"
|
|
2904
|
+
[ "$PHASE_UAT" = "true" ] && phases="${phases}UAT,"
|
|
2905
|
+
phases="${phases%,}" # Remove trailing comma
|
|
2906
|
+
|
|
2907
|
+
# Ralph Wiggum Mode - Reason-Act-Reflect-VERIFY cycle with self-verification loop (Boris Cherny pattern)
|
|
2908
|
+
local rarv_instruction="RALPH WIGGUM MODE ACTIVE. Use Reason-Act-Reflect-VERIFY cycle: 1) REASON - READ .loki/CONTINUITY.md including 'Mistakes & Learnings' section to avoid past errors. CHECK .loki/state/relevant-learnings.json for cross-project learnings from previous projects (mistakes to avoid, patterns to apply). Check .loki/state/ and .loki/queue/, identify next task. CHECK .loki/state/resources.json for system resource warnings - if CPU or memory is high, reduce parallel agent spawning or pause non-critical tasks. Limit to MAX_PARALLEL_AGENTS=${MAX_PARALLEL_AGENTS}. If queue empty, find new improvements. 2) ACT - Execute task, write code, commit changes atomically (git checkpoint). 3) REFLECT - Update .loki/CONTINUITY.md with progress, update state, identify NEXT improvement. Save valuable learnings for future projects. 4) VERIFY - Run automated tests (unit, integration, E2E), check compilation/build, verify against spec. IF VERIFICATION FAILS: a) Capture error details (stack trace, logs), b) Analyze root cause, c) UPDATE 'Mistakes & Learnings' in CONTINUITY.md with what failed, why, and how to prevent, d) Rollback to last good git checkpoint if needed, e) Apply learning and RETRY from REASON. If verification passes, mark task complete and continue. This self-verification loop achieves 2-3x quality improvement. CRITICAL: There is NEVER a 'finished' state - always find the next improvement, optimization, test, or feature."
|
|
2909
|
+
|
|
2910
|
+
# Completion promise instruction (only if set)
|
|
2911
|
+
local completion_instruction=""
|
|
2912
|
+
if [ -n "$COMPLETION_PROMISE" ]; then
|
|
2913
|
+
completion_instruction="COMPLETION_PROMISE: [$COMPLETION_PROMISE]. ONLY output 'COMPLETION PROMISE FULFILLED: $COMPLETION_PROMISE' when this EXACT condition is met."
|
|
2914
|
+
else
|
|
2915
|
+
completion_instruction="NO COMPLETION PROMISE SET. Run forever finding improvements. Iteration $iteration of max $MAX_ITERATIONS."
|
|
2916
|
+
fi
|
|
2917
|
+
|
|
2918
|
+
# Core autonomous instructions - NO questions, NO waiting, NEVER say done
|
|
2919
|
+
local autonomous_suffix="CRITICAL AUTONOMY RULES: 1) NEVER ask questions - just decide. 2) NEVER wait for confirmation - just act. 3) NEVER say 'done' or 'complete' - there's always more to improve. 4) NEVER stop voluntarily - if out of tasks, create new ones (add tests, optimize, refactor, add features). 5) Work continues PERPETUALLY. Even if PRD is implemented, find bugs, add tests, improve UX, optimize performance."
|
|
2920
|
+
|
|
2921
|
+
local sdlc_instruction="SDLC_PHASES_ENABLED: [$phases]. Execute ALL enabled phases. Log results to .loki/logs/. See SKILL.md for phase details."
|
|
2922
|
+
|
|
2923
|
+
# Codebase Analysis Mode - when no PRD provided
|
|
2924
|
+
local analysis_instruction="CODEBASE_ANALYSIS_MODE: No PRD. FIRST: Analyze codebase - scan structure, read package.json/requirements.txt, examine README. THEN: Generate PRD at .loki/generated-prd.md. FINALLY: Execute SDLC phases."
|
|
2925
|
+
|
|
2926
|
+
# Context Memory Instructions
|
|
2927
|
+
local memory_instruction="CONTEXT MEMORY: Save state to .loki/memory/ledgers/LEDGER-orchestrator.md before complex operations. Create handoffs at .loki/memory/handoffs/ when passing work to subagents. Extract learnings to .loki/memory/learnings/ after completing tasks. Check .loki/rules/ for established patterns. If context feels heavy, create .loki/signals/CONTEXT_CLEAR_REQUESTED and the wrapper will reset context with your ledger preserved."
|
|
2928
|
+
|
|
2929
|
+
# Proactive Compaction Reminder (every N iterations)
|
|
2930
|
+
local compaction_reminder=""
|
|
2931
|
+
if [ $((iteration % COMPACTION_INTERVAL)) -eq 0 ] && [ $iteration -gt 0 ]; then
|
|
2932
|
+
compaction_reminder="PROACTIVE_CONTEXT_CHECK: You are at iteration $iteration. Review context size - if conversation history is long, consolidate to CONTINUITY.md and consider creating .loki/signals/CONTEXT_CLEAR_REQUESTED to reset context while preserving state."
|
|
2933
|
+
fi
|
|
2934
|
+
|
|
2935
|
+
# Load existing context if resuming
|
|
2936
|
+
local context_injection=""
|
|
2937
|
+
if [ $retry -gt 0 ]; then
|
|
2938
|
+
local ledger=$(load_ledger_context)
|
|
2939
|
+
local handoff=$(load_handoff_context)
|
|
2940
|
+
|
|
2941
|
+
if [ -n "$ledger" ]; then
|
|
2942
|
+
context_injection="PREVIOUS_LEDGER_STATE: $ledger"
|
|
2943
|
+
fi
|
|
2944
|
+
if [ -n "$handoff" ]; then
|
|
2945
|
+
context_injection="$context_injection RECENT_HANDOFF: $handoff"
|
|
2946
|
+
fi
|
|
2947
|
+
fi
|
|
2948
|
+
|
|
2949
|
+
if [ $retry -eq 0 ]; then
|
|
2950
|
+
if [ -n "$prd" ]; then
|
|
2951
|
+
echo "Loki Mode with PRD at $prd. $rarv_instruction $memory_instruction $compaction_reminder $completion_instruction $sdlc_instruction $autonomous_suffix"
|
|
2952
|
+
else
|
|
2953
|
+
echo "Loki Mode. $analysis_instruction $rarv_instruction $memory_instruction $compaction_reminder $completion_instruction $sdlc_instruction $autonomous_suffix"
|
|
2954
|
+
fi
|
|
2955
|
+
else
|
|
2956
|
+
if [ -n "$prd" ]; then
|
|
2957
|
+
echo "Loki Mode - Resume iteration #$iteration (retry #$retry). PRD: $prd. $context_injection $rarv_instruction $memory_instruction $compaction_reminder $completion_instruction $sdlc_instruction $autonomous_suffix"
|
|
2958
|
+
else
|
|
2959
|
+
echo "Loki Mode - Resume iteration #$iteration (retry #$retry). $context_injection Use .loki/generated-prd.md if exists. $rarv_instruction $memory_instruction $compaction_reminder $completion_instruction $sdlc_instruction $autonomous_suffix"
|
|
2960
|
+
fi
|
|
2961
|
+
fi
|
|
2962
|
+
}
|
|
2963
|
+
|
|
2964
|
+
#===============================================================================
|
|
2965
|
+
# Main Autonomous Loop
|
|
2966
|
+
#===============================================================================
|
|
2967
|
+
|
|
2968
|
+
run_autonomous() {
|
|
2969
|
+
local prd_path="$1"
|
|
2970
|
+
|
|
2971
|
+
log_header "Starting Autonomous Execution"
|
|
2972
|
+
|
|
2973
|
+
# Auto-detect PRD if not provided
|
|
2974
|
+
if [ -z "$prd_path" ]; then
|
|
2975
|
+
log_step "No PRD provided, searching for existing PRD files..."
|
|
2976
|
+
local found_prd=""
|
|
2977
|
+
|
|
2978
|
+
# Search common PRD file patterns
|
|
2979
|
+
for pattern in "PRD.md" "prd.md" "REQUIREMENTS.md" "requirements.md" "SPEC.md" "spec.md" \
|
|
2980
|
+
"docs/PRD.md" "docs/prd.md" "docs/REQUIREMENTS.md" "docs/requirements.md" \
|
|
2981
|
+
"docs/SPEC.md" "docs/spec.md" ".github/PRD.md" "PROJECT.md" "project.md"; do
|
|
2982
|
+
if [ -f "$pattern" ]; then
|
|
2983
|
+
found_prd="$pattern"
|
|
2984
|
+
break
|
|
2985
|
+
fi
|
|
2986
|
+
done
|
|
2987
|
+
|
|
2988
|
+
if [ -n "$found_prd" ]; then
|
|
2989
|
+
log_info "Found existing PRD: $found_prd"
|
|
2990
|
+
prd_path="$found_prd"
|
|
2991
|
+
elif [ -f ".loki/generated-prd.md" ]; then
|
|
2992
|
+
log_info "Using previously generated PRD: .loki/generated-prd.md"
|
|
2993
|
+
prd_path=".loki/generated-prd.md"
|
|
2994
|
+
else
|
|
2995
|
+
log_info "No PRD found - will analyze codebase and generate one"
|
|
2996
|
+
fi
|
|
2997
|
+
fi
|
|
2998
|
+
|
|
2999
|
+
log_info "PRD: ${prd_path:-Codebase Analysis Mode}"
|
|
3000
|
+
log_info "Max retries: $MAX_RETRIES"
|
|
3001
|
+
log_info "Max iterations: $MAX_ITERATIONS"
|
|
3002
|
+
log_info "Completion promise: $COMPLETION_PROMISE"
|
|
3003
|
+
log_info "Base wait: ${BASE_WAIT}s"
|
|
3004
|
+
log_info "Max wait: ${MAX_WAIT}s"
|
|
3005
|
+
log_info "Autonomy mode: $AUTONOMY_MODE"
|
|
3006
|
+
log_info "Prompt repetition (Haiku): $PROMPT_REPETITION"
|
|
3007
|
+
log_info "Confidence routing: $CONFIDENCE_ROUTING"
|
|
3008
|
+
echo ""
|
|
3009
|
+
|
|
3010
|
+
load_state
|
|
3011
|
+
local retry=$RETRY_COUNT
|
|
3012
|
+
|
|
3013
|
+
# Check max iterations before starting
|
|
3014
|
+
if check_max_iterations; then
|
|
3015
|
+
log_error "Max iterations already reached. Reset with: rm .loki/autonomy-state.json"
|
|
3016
|
+
return 1
|
|
3017
|
+
fi
|
|
3018
|
+
|
|
3019
|
+
while [ $retry -lt $MAX_RETRIES ]; do
|
|
3020
|
+
# Increment iteration count
|
|
3021
|
+
((ITERATION_COUNT++))
|
|
3022
|
+
|
|
3023
|
+
# Check max iterations
|
|
3024
|
+
if check_max_iterations; then
|
|
3025
|
+
save_state $retry "max_iterations_reached" 0
|
|
3026
|
+
return 0
|
|
3027
|
+
fi
|
|
3028
|
+
|
|
3029
|
+
local prompt=$(build_prompt $retry "$prd_path" $ITERATION_COUNT)
|
|
3030
|
+
|
|
3031
|
+
echo ""
|
|
3032
|
+
log_header "Attempt $((retry + 1)) of $MAX_RETRIES"
|
|
3033
|
+
log_info "Prompt: $prompt"
|
|
3034
|
+
echo ""
|
|
3035
|
+
|
|
3036
|
+
save_state $retry "running" 0
|
|
3037
|
+
|
|
3038
|
+
# Run Claude Code with live output
|
|
3039
|
+
local start_time=$(date +%s)
|
|
3040
|
+
local log_file=".loki/logs/autonomy-$(date +%Y%m%d).log"
|
|
3041
|
+
|
|
3042
|
+
echo ""
|
|
3043
|
+
echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
|
3044
|
+
echo -e "${CYAN} CLAUDE CODE OUTPUT (live)${NC}"
|
|
3045
|
+
echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
|
3046
|
+
echo ""
|
|
3047
|
+
|
|
3048
|
+
# Log start time
|
|
3049
|
+
echo "=== Session started at $(date) ===" >> "$log_file"
|
|
3050
|
+
echo "=== Prompt: $prompt ===" >> "$log_file"
|
|
3051
|
+
|
|
3052
|
+
set +e
|
|
3053
|
+
# Run Claude with stream-json for real-time output
|
|
3054
|
+
# Parse JSON stream, display formatted output, and track agents
|
|
3055
|
+
claude --dangerously-skip-permissions -p "$prompt" \
|
|
3056
|
+
--output-format stream-json --verbose 2>&1 | \
|
|
3057
|
+
tee -a "$log_file" | \
|
|
3058
|
+
python3 -u -c '
|
|
3059
|
+
import sys
|
|
3060
|
+
import json
|
|
3061
|
+
import os
|
|
3062
|
+
from datetime import datetime, timezone
|
|
3063
|
+
|
|
3064
|
+
# ANSI colors
|
|
3065
|
+
CYAN = "\033[0;36m"
|
|
3066
|
+
GREEN = "\033[0;32m"
|
|
3067
|
+
YELLOW = "\033[1;33m"
|
|
3068
|
+
MAGENTA = "\033[0;35m"
|
|
3069
|
+
DIM = "\033[2m"
|
|
3070
|
+
NC = "\033[0m"
|
|
3071
|
+
|
|
3072
|
+
# Agent tracking
|
|
3073
|
+
AGENTS_FILE = ".loki/state/agents.json"
|
|
3074
|
+
QUEUE_IN_PROGRESS = ".loki/queue/in-progress.json"
|
|
3075
|
+
active_agents = {} # tool_id -> agent_info
|
|
3076
|
+
orchestrator_id = "orchestrator-main"
|
|
3077
|
+
session_start = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
3078
|
+
|
|
3079
|
+
def init_orchestrator():
|
|
3080
|
+
"""Initialize the main orchestrator agent (always visible)."""
|
|
3081
|
+
active_agents[orchestrator_id] = {
|
|
3082
|
+
"agent_id": orchestrator_id,
|
|
3083
|
+
"tool_id": orchestrator_id,
|
|
3084
|
+
"agent_type": "orchestrator",
|
|
3085
|
+
"model": "sonnet",
|
|
3086
|
+
"current_task": "Initializing...",
|
|
3087
|
+
"status": "active",
|
|
3088
|
+
"spawned_at": session_start,
|
|
3089
|
+
"tasks_completed": [],
|
|
3090
|
+
"tool_count": 0
|
|
3091
|
+
}
|
|
3092
|
+
save_agents()
|
|
3093
|
+
|
|
3094
|
+
def update_orchestrator_task(tool_name, description=""):
|
|
3095
|
+
"""Update orchestrator current task based on tool usage."""
|
|
3096
|
+
if orchestrator_id in active_agents:
|
|
3097
|
+
active_agents[orchestrator_id]["tool_count"] = active_agents[orchestrator_id].get("tool_count", 0) + 1
|
|
3098
|
+
if description:
|
|
3099
|
+
active_agents[orchestrator_id]["current_task"] = f"{tool_name}: {description[:80]}"
|
|
3100
|
+
else:
|
|
3101
|
+
active_agents[orchestrator_id]["current_task"] = f"Using {tool_name}..."
|
|
3102
|
+
save_agents()
|
|
3103
|
+
|
|
3104
|
+
def load_agents():
|
|
3105
|
+
"""Load existing agents from file."""
|
|
3106
|
+
try:
|
|
3107
|
+
if os.path.exists(AGENTS_FILE):
|
|
3108
|
+
with open(AGENTS_FILE, "r") as f:
|
|
3109
|
+
data = json.load(f)
|
|
3110
|
+
return {a.get("tool_id", a.get("agent_id")): a for a in data if isinstance(a, dict)}
|
|
3111
|
+
except:
|
|
3112
|
+
pass
|
|
3113
|
+
return {}
|
|
3114
|
+
|
|
3115
|
+
def save_agents():
|
|
3116
|
+
"""Save agents to file for dashboard."""
|
|
3117
|
+
try:
|
|
3118
|
+
os.makedirs(os.path.dirname(AGENTS_FILE), exist_ok=True)
|
|
3119
|
+
agents_list = list(active_agents.values())
|
|
3120
|
+
with open(AGENTS_FILE, "w") as f:
|
|
3121
|
+
json.dump(agents_list, f, indent=2)
|
|
3122
|
+
except Exception as e:
|
|
3123
|
+
print(f"{YELLOW}[Agent save error: {e}]{NC}", file=sys.stderr)
|
|
3124
|
+
|
|
3125
|
+
def save_in_progress(tasks):
|
|
3126
|
+
"""Save in-progress tasks to queue file."""
|
|
3127
|
+
try:
|
|
3128
|
+
os.makedirs(os.path.dirname(QUEUE_IN_PROGRESS), exist_ok=True)
|
|
3129
|
+
with open(QUEUE_IN_PROGRESS, "w") as f:
|
|
3130
|
+
json.dump(tasks, f, indent=2)
|
|
3131
|
+
except:
|
|
3132
|
+
pass
|
|
3133
|
+
|
|
3134
|
+
def process_stream():
|
|
3135
|
+
global active_agents
|
|
3136
|
+
active_agents = load_agents()
|
|
3137
|
+
|
|
3138
|
+
# Always show the main orchestrator
|
|
3139
|
+
init_orchestrator()
|
|
3140
|
+
print(f"{MAGENTA}[Orchestrator Active]{NC} Main agent started", flush=True)
|
|
3141
|
+
|
|
3142
|
+
for line in sys.stdin:
|
|
3143
|
+
line = line.strip()
|
|
3144
|
+
if not line:
|
|
3145
|
+
continue
|
|
3146
|
+
try:
|
|
3147
|
+
data = json.loads(line)
|
|
3148
|
+
msg_type = data.get("type", "")
|
|
3149
|
+
|
|
3150
|
+
if msg_type == "assistant":
|
|
3151
|
+
# Extract and print assistant text
|
|
3152
|
+
message = data.get("message", {})
|
|
3153
|
+
content = message.get("content", [])
|
|
3154
|
+
for item in content:
|
|
3155
|
+
if item.get("type") == "text":
|
|
3156
|
+
text = item.get("text", "")
|
|
3157
|
+
if text:
|
|
3158
|
+
print(text, end="", flush=True)
|
|
3159
|
+
elif item.get("type") == "tool_use":
|
|
3160
|
+
tool = item.get("name", "unknown")
|
|
3161
|
+
tool_id = item.get("id", "")
|
|
3162
|
+
tool_input = item.get("input", {})
|
|
3163
|
+
|
|
3164
|
+
# Extract description based on tool type
|
|
3165
|
+
tool_desc = ""
|
|
3166
|
+
if tool == "Read":
|
|
3167
|
+
tool_desc = tool_input.get("file_path", "")
|
|
3168
|
+
elif tool == "Edit" or tool == "Write":
|
|
3169
|
+
tool_desc = tool_input.get("file_path", "")
|
|
3170
|
+
elif tool == "Bash":
|
|
3171
|
+
tool_desc = tool_input.get("description", tool_input.get("command", "")[:60])
|
|
3172
|
+
elif tool == "Grep":
|
|
3173
|
+
tool_desc = f"pattern: {tool_input.get('pattern', '')}"
|
|
3174
|
+
elif tool == "Glob":
|
|
3175
|
+
tool_desc = tool_input.get("pattern", "")
|
|
3176
|
+
|
|
3177
|
+
# Update orchestrator with current tool activity
|
|
3178
|
+
update_orchestrator_task(tool, tool_desc)
|
|
3179
|
+
|
|
3180
|
+
# Track Task tool calls (agent spawning)
|
|
3181
|
+
if tool == "Task":
|
|
3182
|
+
agent_type = tool_input.get("subagent_type", "general-purpose")
|
|
3183
|
+
description = tool_input.get("description", "")
|
|
3184
|
+
model = tool_input.get("model", "sonnet")
|
|
3185
|
+
|
|
3186
|
+
agent_info = {
|
|
3187
|
+
"agent_id": f"agent-{tool_id[:8]}",
|
|
3188
|
+
"tool_id": tool_id,
|
|
3189
|
+
"agent_type": agent_type,
|
|
3190
|
+
"model": model,
|
|
3191
|
+
"current_task": description,
|
|
3192
|
+
"status": "active",
|
|
3193
|
+
"spawned_at": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
|
|
3194
|
+
"tasks_completed": []
|
|
3195
|
+
}
|
|
3196
|
+
active_agents[tool_id] = agent_info
|
|
3197
|
+
save_agents()
|
|
3198
|
+
print(f"\n{MAGENTA}[Agent Spawned: {agent_type}]{NC} {description}", flush=True)
|
|
3199
|
+
|
|
3200
|
+
# Track TodoWrite for task updates
|
|
3201
|
+
elif tool == "TodoWrite":
|
|
3202
|
+
todos = tool_input.get("todos", [])
|
|
3203
|
+
in_progress = [t for t in todos if t.get("status") == "in_progress"]
|
|
3204
|
+
save_in_progress([{"id": f"todo-{i}", "type": "todo", "payload": {"action": t.get("content", "")}} for i, t in enumerate(in_progress)])
|
|
3205
|
+
print(f"\n{CYAN}[Tool: {tool}]{NC} {len(todos)} items", flush=True)
|
|
3206
|
+
|
|
3207
|
+
else:
|
|
3208
|
+
print(f"\n{CYAN}[Tool: {tool}]{NC}", flush=True)
|
|
3209
|
+
|
|
3210
|
+
elif msg_type == "user":
|
|
3211
|
+
# Tool results - check for agent completion
|
|
3212
|
+
content = data.get("message", {}).get("content", [])
|
|
3213
|
+
for item in content:
|
|
3214
|
+
if item.get("type") == "tool_result":
|
|
3215
|
+
tool_id = item.get("tool_use_id", "")
|
|
3216
|
+
|
|
3217
|
+
# Mark agent as completed if it was a Task
|
|
3218
|
+
if tool_id in active_agents:
|
|
3219
|
+
active_agents[tool_id]["status"] = "completed"
|
|
3220
|
+
active_agents[tool_id]["completed_at"] = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
3221
|
+
save_agents()
|
|
3222
|
+
print(f"{DIM}[Agent Complete]{NC} ", end="", flush=True)
|
|
3223
|
+
else:
|
|
3224
|
+
print(f"{DIM}[Result]{NC} ", end="", flush=True)
|
|
3225
|
+
|
|
3226
|
+
elif msg_type == "result":
|
|
3227
|
+
# Session complete - mark all agents as completed
|
|
3228
|
+
completed_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
3229
|
+
for agent_id in active_agents:
|
|
3230
|
+
if active_agents[agent_id].get("status") == "active":
|
|
3231
|
+
active_agents[agent_id]["status"] = "completed"
|
|
3232
|
+
active_agents[agent_id]["completed_at"] = completed_at
|
|
3233
|
+
active_agents[agent_id]["current_task"] = "Session complete"
|
|
3234
|
+
|
|
3235
|
+
# Add session stats to orchestrator
|
|
3236
|
+
if orchestrator_id in active_agents:
|
|
3237
|
+
tool_count = active_agents[orchestrator_id].get("tool_count", 0)
|
|
3238
|
+
active_agents[orchestrator_id]["tasks_completed"].append(f"{tool_count} tools used")
|
|
3239
|
+
|
|
3240
|
+
save_agents()
|
|
3241
|
+
print(f"\n{GREEN}[Session complete]{NC}", flush=True)
|
|
3242
|
+
is_error = data.get("is_error", False)
|
|
3243
|
+
sys.exit(1 if is_error else 0)
|
|
3244
|
+
|
|
3245
|
+
except json.JSONDecodeError:
|
|
3246
|
+
# Not JSON, print as-is
|
|
3247
|
+
print(line, flush=True)
|
|
3248
|
+
except Exception as e:
|
|
3249
|
+
print(f"{YELLOW}[Parse error: {e}]{NC}", file=sys.stderr)
|
|
3250
|
+
|
|
3251
|
+
if __name__ == "__main__":
|
|
3252
|
+
try:
|
|
3253
|
+
process_stream()
|
|
3254
|
+
except KeyboardInterrupt:
|
|
3255
|
+
sys.exit(130)
|
|
3256
|
+
except BrokenPipeError:
|
|
3257
|
+
sys.exit(0)
|
|
3258
|
+
'
|
|
3259
|
+
local exit_code=${PIPESTATUS[0]}
|
|
3260
|
+
set -e
|
|
3261
|
+
|
|
3262
|
+
echo ""
|
|
3263
|
+
echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
|
3264
|
+
echo ""
|
|
3265
|
+
|
|
3266
|
+
# Log end time
|
|
3267
|
+
echo "=== Session ended at $(date) with exit code $exit_code ===" >> "$log_file"
|
|
3268
|
+
|
|
3269
|
+
local end_time=$(date +%s)
|
|
3270
|
+
local duration=$((end_time - start_time))
|
|
3271
|
+
|
|
3272
|
+
log_info "Claude exited with code $exit_code after ${duration}s"
|
|
3273
|
+
save_state $retry "exited" $exit_code
|
|
3274
|
+
|
|
3275
|
+
# Check for success - ONLY stop on explicit completion promise
|
|
3276
|
+
# There's never a "complete" product - always improvements, bugs, features
|
|
3277
|
+
if [ $exit_code -eq 0 ]; then
|
|
3278
|
+
# Perpetual mode: NEVER stop, always continue
|
|
3279
|
+
if [ "$PERPETUAL_MODE" = "true" ]; then
|
|
3280
|
+
log_info "Perpetual mode: Ignoring exit, continuing immediately..."
|
|
3281
|
+
((retry++))
|
|
3282
|
+
continue # Immediately start next iteration, no wait
|
|
3283
|
+
fi
|
|
3284
|
+
|
|
3285
|
+
# Only stop if EXPLICIT completion promise text was output
|
|
3286
|
+
if [ -n "$COMPLETION_PROMISE" ] && check_completion_promise "$log_file"; then
|
|
3287
|
+
echo ""
|
|
3288
|
+
log_header "COMPLETION PROMISE FULFILLED: $COMPLETION_PROMISE"
|
|
3289
|
+
log_info "Explicit completion promise detected in output."
|
|
3290
|
+
notify_all_complete
|
|
3291
|
+
save_state $retry "completion_promise_fulfilled" 0
|
|
3292
|
+
return 0
|
|
3293
|
+
fi
|
|
3294
|
+
|
|
3295
|
+
# Warn if Claude says it's "done" but no explicit promise
|
|
3296
|
+
if is_completed; then
|
|
3297
|
+
log_warn "Claude claims completion, but no explicit promise fulfilled."
|
|
3298
|
+
log_warn "Projects are never truly complete - there are always improvements!"
|
|
3299
|
+
fi
|
|
3300
|
+
|
|
3301
|
+
# SUCCESS exit - continue IMMEDIATELY to next iteration (no wait!)
|
|
3302
|
+
log_info "Iteration complete. Continuing to next iteration..."
|
|
3303
|
+
((retry++))
|
|
3304
|
+
continue # Immediately start next iteration, no exponential backoff
|
|
3305
|
+
fi
|
|
3306
|
+
|
|
3307
|
+
# Only apply retry logic for ERRORS (non-zero exit code)
|
|
3308
|
+
# Handle retry - check for rate limit first
|
|
3309
|
+
local rate_limit_wait=$(detect_rate_limit "$log_file")
|
|
3310
|
+
local wait_time
|
|
3311
|
+
|
|
3312
|
+
if [ $rate_limit_wait -gt 0 ]; then
|
|
3313
|
+
wait_time=$rate_limit_wait
|
|
3314
|
+
local human_time=$(format_duration $wait_time)
|
|
3315
|
+
log_warn "Rate limit detected! Waiting until reset (~$human_time)..."
|
|
3316
|
+
log_info "Rate limit resets at approximately $(date -v+${wait_time}S '+%I:%M %p' 2>/dev/null || date -d "+${wait_time} seconds" '+%I:%M %p' 2>/dev/null || echo 'soon')"
|
|
3317
|
+
notify_rate_limit "$wait_time"
|
|
3318
|
+
else
|
|
3319
|
+
wait_time=$(calculate_wait $retry)
|
|
3320
|
+
log_warn "Will retry in ${wait_time}s..."
|
|
3321
|
+
fi
|
|
3322
|
+
|
|
3323
|
+
log_info "Press Ctrl+C to cancel"
|
|
3324
|
+
|
|
3325
|
+
# Countdown with progress
|
|
3326
|
+
local remaining=$wait_time
|
|
3327
|
+
local interval=10
|
|
3328
|
+
# Use longer interval for long waits
|
|
3329
|
+
if [ $wait_time -gt 1800 ]; then
|
|
3330
|
+
interval=60
|
|
3331
|
+
fi
|
|
3332
|
+
|
|
3333
|
+
while [ $remaining -gt 0 ]; do
|
|
3334
|
+
local human_remaining=$(format_duration $remaining)
|
|
3335
|
+
printf "\r${YELLOW}Resuming in ${human_remaining}...${NC} "
|
|
3336
|
+
sleep $interval
|
|
3337
|
+
remaining=$((remaining - interval))
|
|
3338
|
+
done
|
|
3339
|
+
echo ""
|
|
3340
|
+
|
|
3341
|
+
((retry++))
|
|
3342
|
+
done
|
|
3343
|
+
|
|
3344
|
+
log_error "Max retries ($MAX_RETRIES) exceeded"
|
|
3345
|
+
save_state $retry "failed" 1
|
|
3346
|
+
return 1
|
|
3347
|
+
}
|
|
3348
|
+
|
|
3349
|
+
#===============================================================================
|
|
3350
|
+
# Human Intervention Mechanism (Auto-Claude pattern)
|
|
3351
|
+
#===============================================================================
|
|
3352
|
+
|
|
3353
|
+
# Track interrupt state for Ctrl+C pause/exit behavior
|
|
3354
|
+
INTERRUPT_COUNT=0
|
|
3355
|
+
INTERRUPT_LAST_TIME=0
|
|
3356
|
+
PAUSED=false
|
|
3357
|
+
|
|
3358
|
+
# Check for human intervention signals
|
|
3359
|
+
check_human_intervention() {
|
|
3360
|
+
local loki_dir="${TARGET_DIR:-.}/.loki"
|
|
3361
|
+
|
|
3362
|
+
# Check for PAUSE file
|
|
3363
|
+
if [ -f "$loki_dir/PAUSE" ]; then
|
|
3364
|
+
log_warn "PAUSE file detected - pausing execution"
|
|
3365
|
+
notify_intervention_needed "Execution paused via PAUSE file"
|
|
3366
|
+
rm -f "$loki_dir/PAUSE"
|
|
3367
|
+
handle_pause
|
|
3368
|
+
return 1
|
|
3369
|
+
fi
|
|
3370
|
+
|
|
3371
|
+
# Check for HUMAN_INPUT.md
|
|
3372
|
+
if [ -f "$loki_dir/HUMAN_INPUT.md" ]; then
|
|
3373
|
+
local human_input=$(cat "$loki_dir/HUMAN_INPUT.md")
|
|
3374
|
+
if [ -n "$human_input" ]; then
|
|
3375
|
+
log_info "Human input detected:"
|
|
3376
|
+
echo "$human_input"
|
|
3377
|
+
echo ""
|
|
3378
|
+
# Move to processed
|
|
3379
|
+
mv "$loki_dir/HUMAN_INPUT.md" "$loki_dir/logs/human-input-$(date +%Y%m%d-%H%M%S).md"
|
|
3380
|
+
# Inject into next prompt
|
|
3381
|
+
export LOKI_HUMAN_INPUT="$human_input"
|
|
3382
|
+
return 0
|
|
3383
|
+
fi
|
|
3384
|
+
fi
|
|
3385
|
+
|
|
3386
|
+
# Check for STOP file (immediate stop)
|
|
3387
|
+
if [ -f "$loki_dir/STOP" ]; then
|
|
3388
|
+
log_warn "STOP file detected - stopping execution"
|
|
3389
|
+
rm -f "$loki_dir/STOP"
|
|
3390
|
+
return 2
|
|
3391
|
+
fi
|
|
3392
|
+
|
|
3393
|
+
return 0
|
|
3394
|
+
}
|
|
3395
|
+
|
|
3396
|
+
# Handle pause state - wait for resume
|
|
3397
|
+
handle_pause() {
|
|
3398
|
+
PAUSED=true
|
|
3399
|
+
local loki_dir="${TARGET_DIR:-.}/.loki"
|
|
3400
|
+
|
|
3401
|
+
log_header "Execution Paused"
|
|
3402
|
+
echo ""
|
|
3403
|
+
log_info "To resume: Remove .loki/PAUSE or press Enter"
|
|
3404
|
+
log_info "To add instructions: echo 'your instructions' > .loki/HUMAN_INPUT.md"
|
|
3405
|
+
log_info "To stop completely: touch .loki/STOP"
|
|
3406
|
+
echo ""
|
|
3407
|
+
|
|
3408
|
+
# Create resume instructions file
|
|
3409
|
+
cat > "$loki_dir/PAUSED.md" << 'EOF'
|
|
3410
|
+
# Loki Mode - Paused
|
|
3411
|
+
|
|
3412
|
+
Execution is currently paused. Options:
|
|
3413
|
+
|
|
3414
|
+
1. **Resume**: Press Enter in terminal or `rm .loki/PAUSE`
|
|
3415
|
+
2. **Add Instructions**: `echo "Focus on fixing the login bug" > .loki/HUMAN_INPUT.md`
|
|
3416
|
+
3. **Stop**: `touch .loki/STOP`
|
|
3417
|
+
|
|
3418
|
+
Current state is saved. You can inspect:
|
|
3419
|
+
- `.loki/CONTINUITY.md` - Progress and context
|
|
3420
|
+
- `.loki/STATUS.txt` - Current status
|
|
3421
|
+
- `.loki/logs/` - Session logs
|
|
3422
|
+
EOF
|
|
3423
|
+
|
|
3424
|
+
# Wait for resume signal
|
|
3425
|
+
while [ "$PAUSED" = "true" ]; do
|
|
3426
|
+
# Check for resume conditions
|
|
3427
|
+
if [ -f "$loki_dir/STOP" ]; then
|
|
3428
|
+
rm -f "$loki_dir/STOP" "$loki_dir/PAUSED.md"
|
|
3429
|
+
PAUSED=false
|
|
3430
|
+
return 1
|
|
3431
|
+
fi
|
|
3432
|
+
|
|
3433
|
+
# Check for any key press (non-blocking)
|
|
3434
|
+
if read -t 1 -n 1 2>/dev/null; then
|
|
3435
|
+
PAUSED=false
|
|
3436
|
+
break
|
|
3437
|
+
fi
|
|
3438
|
+
|
|
3439
|
+
sleep 1
|
|
3440
|
+
done
|
|
3441
|
+
|
|
3442
|
+
rm -f "$loki_dir/PAUSED.md"
|
|
3443
|
+
log_info "Resuming execution..."
|
|
3444
|
+
PAUSED=false
|
|
3445
|
+
return 0
|
|
3446
|
+
}
|
|
3447
|
+
|
|
3448
|
+
#===============================================================================
|
|
3449
|
+
# Cleanup Handler (with Ctrl+C pause support)
|
|
3450
|
+
#===============================================================================
|
|
3451
|
+
|
|
3452
|
+
cleanup() {
|
|
3453
|
+
local current_time=$(date +%s)
|
|
3454
|
+
local time_diff=$((current_time - INTERRUPT_LAST_TIME))
|
|
3455
|
+
|
|
3456
|
+
# If double Ctrl+C within 2 seconds, exit immediately
|
|
3457
|
+
if [ "$time_diff" -lt 2 ] && [ "$INTERRUPT_COUNT" -gt 0 ]; then
|
|
3458
|
+
echo ""
|
|
3459
|
+
log_warn "Double interrupt - stopping immediately"
|
|
3460
|
+
stop_dashboard
|
|
3461
|
+
stop_status_monitor
|
|
3462
|
+
save_state ${RETRY_COUNT:-0} "interrupted" 130
|
|
3463
|
+
log_info "State saved. Run again to resume."
|
|
3464
|
+
exit 130
|
|
3465
|
+
fi
|
|
3466
|
+
|
|
3467
|
+
# First Ctrl+C - pause and show options
|
|
3468
|
+
INTERRUPT_COUNT=$((INTERRUPT_COUNT + 1))
|
|
3469
|
+
INTERRUPT_LAST_TIME=$current_time
|
|
3470
|
+
|
|
3471
|
+
echo ""
|
|
3472
|
+
log_warn "Interrupt received - pausing..."
|
|
3473
|
+
log_info "Press Ctrl+C again within 2 seconds to exit"
|
|
3474
|
+
log_info "Or wait to add instructions..."
|
|
3475
|
+
echo ""
|
|
3476
|
+
|
|
3477
|
+
# Create pause state
|
|
3478
|
+
touch "${TARGET_DIR:-.}/.loki/PAUSE"
|
|
3479
|
+
handle_pause
|
|
3480
|
+
|
|
3481
|
+
# Reset interrupt count after pause
|
|
3482
|
+
INTERRUPT_COUNT=0
|
|
3483
|
+
}
|
|
3484
|
+
|
|
3485
|
+
#===============================================================================
|
|
3486
|
+
# Main Entry Point
|
|
3487
|
+
#===============================================================================
|
|
3488
|
+
|
|
3489
|
+
main() {
|
|
3490
|
+
trap cleanup INT TERM
|
|
3491
|
+
|
|
3492
|
+
echo ""
|
|
3493
|
+
echo -e "${BOLD}${BLUE}"
|
|
3494
|
+
echo " ██╗ ██████╗ ██╗ ██╗██╗ ███╗ ███╗ ██████╗ ██████╗ ███████╗"
|
|
3495
|
+
echo " ██║ ██╔═══██╗██║ ██╔╝██║ ████╗ ████║██╔═══██╗██╔══██╗██╔════╝"
|
|
3496
|
+
echo " ██║ ██║ ██║█████╔╝ ██║ ██╔████╔██║██║ ██║██║ ██║█████╗ "
|
|
3497
|
+
echo " ██║ ██║ ██║██╔═██╗ ██║ ██║╚██╔╝██║██║ ██║██║ ██║██╔══╝ "
|
|
3498
|
+
echo " ███████╗╚██████╔╝██║ ██╗██║ ██║ ╚═╝ ██║╚██████╔╝██████╔╝███████╗"
|
|
3499
|
+
echo " ╚══════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝"
|
|
3500
|
+
echo -e "${NC}"
|
|
3501
|
+
echo -e " ${CYAN}Autonomous Multi-Agent Startup System${NC}"
|
|
3502
|
+
echo -e " ${CYAN}Version: $(cat "$PROJECT_DIR/VERSION" 2>/dev/null || echo "4.x.x")${NC}"
|
|
3503
|
+
echo ""
|
|
3504
|
+
|
|
3505
|
+
# Parse arguments
|
|
3506
|
+
PRD_PATH=""
|
|
3507
|
+
for arg in "$@"; do
|
|
3508
|
+
case "$arg" in
|
|
3509
|
+
--parallel)
|
|
3510
|
+
PARALLEL_MODE=true
|
|
3511
|
+
;;
|
|
3512
|
+
--help|-h)
|
|
3513
|
+
echo "Usage: ./autonomy/run.sh [OPTIONS] [PRD_PATH]"
|
|
3514
|
+
echo ""
|
|
3515
|
+
echo "Options:"
|
|
3516
|
+
echo " --parallel Enable git worktree-based parallel workflows"
|
|
3517
|
+
echo " --help, -h Show this help message"
|
|
3518
|
+
echo ""
|
|
3519
|
+
echo "Environment variables: See header comments in this script"
|
|
3520
|
+
exit 0
|
|
3521
|
+
;;
|
|
3522
|
+
*)
|
|
3523
|
+
if [ -z "$PRD_PATH" ]; then
|
|
3524
|
+
PRD_PATH="$arg"
|
|
3525
|
+
fi
|
|
3526
|
+
;;
|
|
3527
|
+
esac
|
|
3528
|
+
done
|
|
3529
|
+
|
|
3530
|
+
# Validate PRD if provided
|
|
3531
|
+
if [ -n "$PRD_PATH" ] && [ ! -f "$PRD_PATH" ]; then
|
|
3532
|
+
log_error "PRD file not found: $PRD_PATH"
|
|
3533
|
+
exit 1
|
|
3534
|
+
fi
|
|
3535
|
+
|
|
3536
|
+
# Show parallel mode status
|
|
3537
|
+
if [ "$PARALLEL_MODE" = "true" ]; then
|
|
3538
|
+
log_info "Parallel mode enabled (git worktrees)"
|
|
3539
|
+
fi
|
|
3540
|
+
|
|
3541
|
+
# Check prerequisites (unless skipped)
|
|
3542
|
+
if [ "$SKIP_PREREQS" != "true" ]; then
|
|
3543
|
+
if ! check_prerequisites; then
|
|
3544
|
+
exit 1
|
|
3545
|
+
fi
|
|
3546
|
+
else
|
|
3547
|
+
log_warn "Skipping prerequisite checks (LOKI_SKIP_PREREQS=true)"
|
|
3548
|
+
fi
|
|
3549
|
+
|
|
3550
|
+
# Check skill installation
|
|
3551
|
+
if ! check_skill_installed; then
|
|
3552
|
+
exit 1
|
|
3553
|
+
fi
|
|
3554
|
+
|
|
3555
|
+
# Initialize .loki directory
|
|
3556
|
+
init_loki_dir
|
|
3557
|
+
|
|
3558
|
+
# Import GitHub issues if enabled (v4.1.0)
|
|
3559
|
+
if [ "$GITHUB_IMPORT" = "true" ]; then
|
|
3560
|
+
import_github_issues
|
|
3561
|
+
fi
|
|
3562
|
+
|
|
3563
|
+
# Start web dashboard (if enabled)
|
|
3564
|
+
if [ "$ENABLE_DASHBOARD" = "true" ]; then
|
|
3565
|
+
start_dashboard
|
|
3566
|
+
else
|
|
3567
|
+
log_info "Dashboard disabled (LOKI_DASHBOARD=false)"
|
|
3568
|
+
fi
|
|
3569
|
+
|
|
3570
|
+
# Start status monitor (background updates to .loki/STATUS.txt)
|
|
3571
|
+
start_status_monitor
|
|
3572
|
+
|
|
3573
|
+
# Start resource monitor (background CPU/memory checks)
|
|
3574
|
+
start_resource_monitor
|
|
3575
|
+
|
|
3576
|
+
# Initialize cross-project learnings database
|
|
3577
|
+
init_learnings_db
|
|
3578
|
+
|
|
3579
|
+
# Load relevant learnings for this project context
|
|
3580
|
+
if [ -n "$PRD_PATH" ] && [ -f "$PRD_PATH" ]; then
|
|
3581
|
+
get_relevant_learnings "$(cat "$PRD_PATH" | head -100)"
|
|
3582
|
+
else
|
|
3583
|
+
get_relevant_learnings "general development"
|
|
3584
|
+
fi
|
|
3585
|
+
|
|
3586
|
+
# Log session start for audit
|
|
3587
|
+
audit_log "SESSION_START" "prd=$PRD_PATH,dashboard=$ENABLE_DASHBOARD,staged_autonomy=$STAGED_AUTONOMY,parallel=$PARALLEL_MODE"
|
|
3588
|
+
|
|
3589
|
+
# Run in appropriate mode
|
|
3590
|
+
local result=0
|
|
3591
|
+
if [ "$PARALLEL_MODE" = "true" ]; then
|
|
3592
|
+
# Parallel mode: orchestrate multiple worktrees
|
|
3593
|
+
log_header "Running in Parallel Mode"
|
|
3594
|
+
log_info "Max worktrees: $MAX_WORKTREES"
|
|
3595
|
+
log_info "Max parallel sessions: $MAX_PARALLEL_SESSIONS"
|
|
3596
|
+
|
|
3597
|
+
# Run main session + orchestrator
|
|
3598
|
+
(
|
|
3599
|
+
# Start main development session
|
|
3600
|
+
run_autonomous "$PRD_PATH"
|
|
3601
|
+
) &
|
|
3602
|
+
local main_pid=$!
|
|
3603
|
+
|
|
3604
|
+
# Run parallel orchestrator
|
|
3605
|
+
run_parallel_orchestrator &
|
|
3606
|
+
local orchestrator_pid=$!
|
|
3607
|
+
|
|
3608
|
+
# Wait for main session (orchestrator continues watching)
|
|
3609
|
+
wait $main_pid || result=$?
|
|
3610
|
+
|
|
3611
|
+
# Signal orchestrator to stop
|
|
3612
|
+
kill $orchestrator_pid 2>/dev/null || true
|
|
3613
|
+
wait $orchestrator_pid 2>/dev/null || true
|
|
3614
|
+
|
|
3615
|
+
# Cleanup parallel streams
|
|
3616
|
+
cleanup_parallel_streams
|
|
3617
|
+
else
|
|
3618
|
+
# Standard mode: single session
|
|
3619
|
+
run_autonomous "$PRD_PATH" || result=$?
|
|
3620
|
+
fi
|
|
3621
|
+
|
|
3622
|
+
# Extract and save learnings from this session
|
|
3623
|
+
extract_learnings_from_session
|
|
3624
|
+
|
|
3625
|
+
# Log session end for audit
|
|
3626
|
+
audit_log "SESSION_END" "result=$result,prd=$PRD_PATH"
|
|
3627
|
+
|
|
3628
|
+
# Cleanup
|
|
3629
|
+
stop_dashboard
|
|
3630
|
+
stop_status_monitor
|
|
3631
|
+
|
|
3632
|
+
exit $result
|
|
3633
|
+
}
|
|
3634
|
+
|
|
3635
|
+
# Run main
|
|
3636
|
+
main "$@"
|