loki-mode 5.58.0 → 5.58.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/SKILL.md CHANGED
@@ -3,7 +3,7 @@ name: loki-mode
3
3
  description: Multi-agent autonomous startup system. Triggers on "Loki Mode". Takes PRD to deployed product with minimal human intervention. Requires --dangerously-skip-permissions flag.
4
4
  ---
5
5
 
6
- # Loki Mode v5.58.0
6
+ # Loki Mode v5.58.2
7
7
 
8
8
  **You are an autonomous agent. You make decisions. You do not ask questions. You do not stop.**
9
9
 
@@ -263,4 +263,4 @@ The following features are documented in skill modules but not yet fully automat
263
263
  | Quality gates 3-reviewer system | Implemented (v5.35.0) | 5 specialist reviewers in `skills/quality-gates.md`; execution in run.sh |
264
264
  | Benchmarks (HumanEval, SWE-bench) | Infrastructure only | Runner scripts and datasets exist in `benchmarks/`; no published results |
265
265
 
266
- **v5.58.0 | [Autonomi](https://www.autonomi.dev/) flagship product | ~260 lines core**
266
+ **v5.58.2 | [Autonomi](https://www.autonomi.dev/) flagship product | ~260 lines core**
package/VERSION CHANGED
@@ -1 +1 @@
1
- 5.58.0
1
+ 5.58.2
@@ -144,7 +144,8 @@ _detect_port() {
144
144
  compose_file="${TARGET_DIR:-.}/compose.yml"
145
145
  fi
146
146
  local port
147
- port=$(grep -E '^\s*-\s*"?[0-9]+:[0-9]+"?' "$compose_file" 2>/dev/null | head -1 | grep -oE '[0-9]+:[0-9]+' | tail -1 | cut -d: -f1)
147
+ # Handle both simple (HOST:CONTAINER) and IP-bound (IP:HOST:CONTAINER) port formats
148
+ port=$(grep -E '^\s*-\s*"?[0-9]' "$compose_file" 2>/dev/null | head -1 | sed 's/.*- *"*//;s/".*//;' | awk -F: '{print $(NF-1)}')
148
149
  _APP_RUNNER_PORT="${port:-8080}"
149
150
  ;;
150
151
  *docker\ build*)
@@ -41,6 +41,10 @@ COUNCIL_ENABLED=${LOKI_COUNCIL_ENABLED:-true}
41
41
  COUNCIL_SIZE=${LOKI_COUNCIL_SIZE:-3}
42
42
  COUNCIL_THRESHOLD=${LOKI_COUNCIL_THRESHOLD:-2}
43
43
  COUNCIL_CHECK_INTERVAL=${LOKI_COUNCIL_CHECK_INTERVAL:-5}
44
+ # Guard against zero/negative interval (division by zero in modulo)
45
+ if [ "$COUNCIL_CHECK_INTERVAL" -le 0 ] 2>/dev/null; then
46
+ COUNCIL_CHECK_INTERVAL=5
47
+ fi
44
48
  COUNCIL_MIN_ITERATIONS=${LOKI_COUNCIL_MIN_ITERATIONS:-3}
45
49
  COUNCIL_CONVERGENCE_WINDOW=${LOKI_COUNCIL_CONVERGENCE_WINDOW:-3}
46
50
  COUNCIL_STAGNATION_LIMIT=${LOKI_COUNCIL_STAGNATION_LIMIT:-5}
@@ -271,15 +275,17 @@ council_vote() {
271
275
  while IFS= read -r issue_line; do
272
276
  local issue_severity
273
277
  issue_severity=$(echo "$issue_line" | grep -oE "(CRITICAL|HIGH|MEDIUM|LOW)" | head -1 | tr '[:upper:]' '[:lower:]')
278
+ # Reset per issue line so previous iterations don't poison the check
279
+ threshold_reached=false
274
280
  # Check if this severity meets or exceeds the threshold
275
281
  for sev in $severity_order; do
276
- if [ "$sev" = "$COUNCIL_SEVERITY_THRESHOLD" ]; then
277
- threshold_reached=true
278
- fi
279
282
  if [ "$sev" = "$issue_severity" ] && [ "$threshold_reached" = "false" ]; then
280
283
  has_blocking_issue=true
281
284
  break
282
285
  fi
286
+ if [ "$sev" = "$COUNCIL_SEVERITY_THRESHOLD" ]; then
287
+ threshold_reached=true
288
+ fi
283
289
  done
284
290
  done <<< "$member_issues"
285
291
 
package/autonomy/loki CHANGED
@@ -3424,11 +3424,11 @@ cmd_api() {
3424
3424
  # Start server
3425
3425
  mkdir -p "$LOKI_DIR/logs" "$LOKI_DIR/dashboard"
3426
3426
  local host="${LOKI_DASHBOARD_HOST:-127.0.0.1}"
3427
- local uvicorn_args="--host $host --port $port"
3427
+ local uvicorn_args=("--host" "$host" "--port" "$port")
3428
3428
  if [ -n "${LOKI_TLS_CERT:-}" ] && [ -n "${LOKI_TLS_KEY:-}" ]; then
3429
- uvicorn_args="$uvicorn_args --ssl-certfile ${LOKI_TLS_CERT} --ssl-keyfile ${LOKI_TLS_KEY}"
3429
+ uvicorn_args+=("--ssl-certfile" "${LOKI_TLS_CERT}" "--ssl-keyfile" "${LOKI_TLS_KEY}")
3430
3430
  fi
3431
- LOKI_DIR="$LOKI_DIR" PYTHONPATH="$SKILL_DIR" nohup "$api_python" -m uvicorn dashboard.server:app $uvicorn_args > "$LOKI_DIR/logs/api.log" 2>&1 &
3431
+ LOKI_DIR="$LOKI_DIR" PYTHONPATH="$SKILL_DIR" nohup "$api_python" -m uvicorn dashboard.server:app "${uvicorn_args[@]}" > "$LOKI_DIR/logs/api.log" 2>&1 &
3432
3432
  local new_pid=$!
3433
3433
  echo "$new_pid" > "$pid_file"
3434
3434
 
@@ -5006,7 +5006,7 @@ cmd_migrate_start() {
5006
5006
 
5007
5007
  # Check for path traversal BEFORE canonicalization
5008
5008
  case "$codebase_path" in
5009
- *..*)
5009
+ ../*|*/../*|*/..|--)
5010
5010
  echo -e "${RED}Error: Path traversal not allowed in codebase path${NC}"
5011
5011
  return 1
5012
5012
  ;;
@@ -5062,8 +5062,7 @@ cmd_migrate_start() {
5062
5062
  # Handle resume
5063
5063
  if [ "$resume" = "true" ]; then
5064
5064
  local latest_manifest
5065
- latest_manifest=$(find "$migrations_dir" -name "manifest.json" -maxdepth 2 2>/dev/null | \
5066
- xargs python3 -c "
5065
+ latest_manifest=$(find "$migrations_dir" -name "manifest.json" -maxdepth 2 -exec python3 -c "
5067
5066
  import json, sys
5068
5067
  codebase = sys.argv[1]
5069
5068
  manifests = []
@@ -5076,7 +5075,7 @@ for path in sys.argv[2:]:
5076
5075
  except: pass
5077
5076
  if manifests:
5078
5077
  print(manifests[-1])
5079
- " "$codebase_path" 2>/dev/null || echo "")
5078
+ " "$codebase_path" {} + 2>/dev/null || echo "")
5080
5079
 
5081
5080
  if [ -z "$latest_manifest" ]; then
5082
5081
  echo -e "${RED}Error: No resumable migration found for ${codebase_path}${NC}"
@@ -5239,7 +5238,7 @@ Tasks:
5239
5238
  1. Analyze the full codebase structure (languages, frameworks, dependencies, architecture)
5240
5239
  2. Create the docs directory: mkdir -p ${migration_dir}/docs
5241
5240
  3. Write analysis documentation to ${migration_dir}/docs/analysis.md
5242
- 3. Identify migration seams (logical boundaries for incremental migration) and write them to ${migration_dir}/seams.json as a JSON array of objects with fields: id, name, description, files (array of file paths), dependencies (array of seam ids), priority (high/medium/low)
5241
+ 3. Identify migration seams (logical boundaries for incremental migration) and write them to ${migration_dir}/seams.json as a JSON array of objects with fields: id (string, e.g. 'seam-01'), name (string), description (string), type (string: 'module'/'api'/'config'/'adapter'), files (array of file paths), dependencies (array of seam ids), priority (string: 'high'/'medium'/'low')
5243
5242
 
5244
5243
  You MUST create both files. The migration cannot proceed without them.
5245
5244
  Write the analysis doc first, then the seams.json."
@@ -5255,7 +5254,7 @@ Read ${migration_dir}/docs/analysis.md and ${migration_dir}/seams.json for conte
5255
5254
 
5256
5255
  Tasks:
5257
5256
  1. Identify existing tests and create characterization tests that capture current behavior
5258
- 2. Write ${migration_dir}/features.json as a JSON array of objects with fields: id, name, description, test_command (shell command to verify), passes (boolean, set to true for existing passing behavior)
5257
+ 2. Write ${migration_dir}/features.json as a JSON array of objects with fields: id (string, e.g. 'F01'), category (string, e.g. 'core'), description (string), characterization_test (string, shell command to verify), passes (boolean, set to true for existing passing behavior), risk (string: 'low'/'medium'/'high')
5259
5258
  3. Create a git checkpoint: cd ${codebase_path} && git stash || true
5260
5259
 
5261
5260
  All features in features.json must have passes: true for the gate to pass."
@@ -5270,7 +5269,7 @@ Migration dir: ${migration_dir}
5270
5269
  Read ${migration_dir}/docs/analysis.md, ${migration_dir}/seams.json, and ${migration_dir}/features.json for context.
5271
5270
 
5272
5271
  Tasks:
5273
- 1. Create a migration plan and write it to ${migration_dir}/migration-plan.json with fields: target, source_path, steps (array of objects with: id, name, description, status set to 'completed' after you do the step)
5272
+ 1. Create a migration plan and write it to ${migration_dir}/migration-plan.json as a JSON object with fields: version (integer, default 1), strategy (string: 'incremental' or 'big_bang'), steps (array of objects with: id (string), description (string), type (string: 'refactor'/'rewrite'/'config'/'test'), status (string, set to 'completed' after you do the step))
5274
5273
  2. Execute the actual code migration transforms in ${codebase_path} -- convert code from the current framework/language to ${target}
5275
5274
  3. Update each step status to 'completed' as you finish it
5276
5275
  4. Work incrementally seam by seam from ${migration_dir}/seams.json
@@ -5323,9 +5322,19 @@ except Exception: pass
5323
5322
  gemini)
5324
5323
  (cd "$codebase_path" && gemini --approval-mode=yolo "$phase_prompt" 2>&1) || phase_exit=$?
5325
5324
  ;;
5325
+ *)
5326
+ echo -e "${RED}Error: Unknown provider '${provider_name}'. Supported: claude, codex, gemini${NC}"
5327
+ phase_exit=1
5328
+ ;;
5326
5329
  esac
5327
5330
  fi
5328
5331
 
5332
+ # Check provider exit code before proceeding
5333
+ if [ "$phase_exit" -ne 0 ]; then
5334
+ echo -e "${RED}Error: Provider exited with code $phase_exit during phase $p${NC}"
5335
+ # Don't advance phase -- gate check will catch missing artifacts
5336
+ fi
5337
+
5329
5338
  # Verify phase gate artifacts exist before advancing
5330
5339
  local gate_ok=true
5331
5340
  case "$p" in
@@ -202,7 +202,8 @@ try:
202
202
  if failing_items:
203
203
  detail = ' FAILING: ' + ', '.join(failing_items[:5])
204
204
  waived_str = f', {waived_count} waived' if waived_count > 0 else ''
205
- print(f'{verified}/{total} verified, {failing} failing{waived_str}, {pending} pending.{detail}')
205
+ adjusted_failing = max(0, failing - waived_count)
206
+ print(f'{verified}/{total} verified, {adjusted_failing} failing{waived_str}, {pending} pending.{detail}')
206
207
  except Exception:
207
208
  print('', file=sys.stderr)
208
209
  " 2>/dev/null || echo ""
package/autonomy/run.sh CHANGED
@@ -503,6 +503,8 @@ LAST_WATCHDOG_CHECK=0
503
503
 
504
504
  STATUS_MONITOR_PID=""
505
505
  DASHBOARD_PID=""
506
+ DASHBOARD_LAST_ALIVE=0
507
+ _DASHBOARD_RESTARTING=false
506
508
  RESOURCE_MONITOR_PID=""
507
509
 
508
510
  # SDLC Phase Controls (all enabled by default)
@@ -5521,6 +5523,7 @@ start_dashboard() {
5521
5523
  sleep 2
5522
5524
 
5523
5525
  if kill -0 "$DASHBOARD_PID" 2>/dev/null; then
5526
+ DASHBOARD_LAST_ALIVE=$(date +%s)
5524
5527
  log_info "Dashboard started (PID: $DASHBOARD_PID)"
5525
5528
  log_info "Dashboard: ${CYAN}${url_scheme}://127.0.0.1:$DASHBOARD_PORT/${NC}"
5526
5529
 
@@ -5559,11 +5562,16 @@ stop_dashboard() {
5559
5562
  # Handle dashboard crash: restart silently without triggering pause handler
5560
5563
  # This prevents a killed dashboard from being misinterpreted as a user interrupt
5561
5564
  handle_dashboard_crash() {
5565
+ # Reentrancy guard: prevent recursive restarts from signal handlers
5566
+ if [[ "$_DASHBOARD_RESTARTING" == "true" ]]; then
5567
+ return 0
5568
+ fi
5569
+
5562
5570
  if [[ "${ENABLE_DASHBOARD:-true}" != "true" ]]; then
5563
5571
  return 0
5564
5572
  fi
5565
5573
 
5566
- local dashboard_pid_file=".loki/dashboard/dashboard.pid"
5574
+ local dashboard_pid_file="${TARGET_DIR:-.}/.loki/dashboard/dashboard.pid"
5567
5575
  if [[ ! -f "$dashboard_pid_file" ]]; then
5568
5576
  return 0
5569
5577
  fi
@@ -5598,7 +5606,9 @@ handle_dashboard_crash() {
5598
5606
  "autonomy_mode=$AUTONOMY_MODE"
5599
5607
  DASHBOARD_PID=""
5600
5608
  rm -f "$dashboard_pid_file"
5609
+ _DASHBOARD_RESTARTING=true
5601
5610
  start_dashboard
5611
+ _DASHBOARD_RESTARTING=false
5602
5612
  return 0
5603
5613
  }
5604
5614
 
@@ -5606,17 +5616,31 @@ handle_dashboard_crash() {
5606
5616
  # rather than an actual user interrupt. Returns 0 if it was a child crash
5607
5617
  # (handled silently), 1 if it was a real interrupt.
5608
5618
  is_child_process_signal() {
5609
- # If dashboard PID is set and dashboard is now dead, this signal
5610
- # was likely caused by the dashboard process exiting
5619
+ local dashboard_pid_file="${TARGET_DIR:-.}/.loki/dashboard/dashboard.pid"
5620
+ local now
5621
+ now=$(date +%s)
5622
+
5623
+ # If dashboard PID is set and dashboard is now dead, check timing to
5624
+ # distinguish a real Ctrl+C (which kills both parent and child in the
5625
+ # same process group) from an independent child crash.
5611
5626
  if [ -n "$DASHBOARD_PID" ] && ! kill -0 "$DASHBOARD_PID" 2>/dev/null; then
5627
+ local time_since_alive=$((now - DASHBOARD_LAST_ALIVE))
5628
+ if [ "$DASHBOARD_LAST_ALIVE" -gt 0 ] && [ "$time_since_alive" -lt 2 ]; then
5629
+ # Dashboard was alive very recently -- it likely died from the same
5630
+ # SIGINT that we just received (process group signal). Treat as real
5631
+ # user interrupt, but still restart the dashboard in the background.
5632
+ handle_dashboard_crash
5633
+ return 1
5634
+ fi
5635
+ # Dashboard has been dead for a while -- this is an independent crash
5612
5636
  handle_dashboard_crash
5613
5637
  return 0
5614
5638
  fi
5615
5639
 
5616
5640
  # Check PID file as fallback
5617
- if [ -f ".loki/dashboard/dashboard.pid" ]; then
5641
+ if [ -f "$dashboard_pid_file" ]; then
5618
5642
  local dpid
5619
- dpid=$(cat ".loki/dashboard/dashboard.pid" 2>/dev/null)
5643
+ dpid=$(cat "$dashboard_pid_file" 2>/dev/null)
5620
5644
  if [ -n "$dpid" ] && ! kill -0 "$dpid" 2>/dev/null; then
5621
5645
  handle_dashboard_crash
5622
5646
  return 0
@@ -5937,7 +5961,7 @@ watchdog_check() {
5937
5961
  [[ "$WATCHDOG_ENABLED" != "true" ]] && return 0
5938
5962
 
5939
5963
  # Check dashboard health
5940
- local dashboard_pid_file=".loki/dashboard/dashboard.pid"
5964
+ local dashboard_pid_file="${TARGET_DIR:-.}/.loki/dashboard/dashboard.pid"
5941
5965
  if [[ -f "$dashboard_pid_file" ]]; then
5942
5966
  local dpid
5943
5967
  dpid=$(cat "$dashboard_pid_file" 2>/dev/null)
@@ -5951,8 +5975,13 @@ watchdog_check() {
5951
5975
  # Auto-restart dashboard if it was previously running
5952
5976
  if [[ "${ENABLE_DASHBOARD:-true}" == "true" ]]; then
5953
5977
  log_info "WATCHDOG: Restarting dashboard..."
5978
+ DASHBOARD_PID=""
5979
+ rm -f "$dashboard_pid_file"
5954
5980
  start_dashboard
5955
5981
  fi
5982
+ else
5983
+ # Dashboard is alive -- update last-alive timestamp
5984
+ DASHBOARD_LAST_ALIVE=$(date +%s)
5956
5985
  fi
5957
5986
  fi
5958
5987
 
@@ -7376,7 +7405,20 @@ check_human_intervention() {
7376
7405
  # propagate that as return 2 (stop) instead of always returning 1 (continue).
7377
7406
  if [ -f "$loki_dir/PAUSE" ]; then
7378
7407
  # In perpetual mode: auto-clear PAUSE files and continue without waiting
7408
+ # EXCEPT when PAUSE was created by budget limit enforcement
7379
7409
  if [ "$AUTONOMY_MODE" = "perpetual" ] || [ "$PERPETUAL_MODE" = "true" ]; then
7410
+ if [ -f "$loki_dir/signals/BUDGET_EXCEEDED" ]; then
7411
+ log_warn "PAUSE file created by budget limit - NOT auto-clearing in perpetual mode"
7412
+ log_warn "Budget limit reached. Remove .loki/signals/BUDGET_EXCEEDED and .loki/PAUSE to continue."
7413
+ notify_intervention_needed "Budget limit reached - execution paused" 2>/dev/null || true
7414
+ handle_pause
7415
+ local pause_result=$?
7416
+ rm -f "$loki_dir/PAUSE"
7417
+ if [ "$pause_result" -eq 1 ]; then
7418
+ return 2
7419
+ fi
7420
+ return 1
7421
+ fi
7380
7422
  log_warn "PAUSE file detected but autonomy mode is perpetual - auto-clearing"
7381
7423
  notify_intervention_needed "PAUSE file auto-cleared in perpetual mode" 2>/dev/null || true
7382
7424
  rm -f "$loki_dir/PAUSE" "$loki_dir/PAUSED.md"
@@ -7625,7 +7667,7 @@ except (json.JSONDecodeError, OSError): pass
7625
7667
  # rather than an actual user interrupt. In that case, handle silently.
7626
7668
  if is_child_process_signal; then
7627
7669
  log_info "Child process exit detected, handled silently"
7628
- INTERRUPT_COUNT=0
7670
+ # Do NOT reset INTERRUPT_COUNT -- preserves double-Ctrl+C escape capability
7629
7671
  return
7630
7672
  fi
7631
7673
 
@@ -7,7 +7,7 @@ Modules:
7
7
  control: Session control API (start/stop/pause/resume)
8
8
  """
9
9
 
10
- __version__ = "5.58.0"
10
+ __version__ = "5.58.2"
11
11
 
12
12
  # Expose the control app for easy import
13
13
  try:
@@ -259,7 +259,7 @@ def get_status() -> StatusResponse:
259
259
  age_hours = (datetime.now(timezone.utc) - start_time_parsed).total_seconds() / 3600
260
260
  if age_hours > 6:
261
261
  session_data["status"] = "stopped"
262
- session_file.write_text(json.dumps(session_data))
262
+ atomic_write_json(session_file, session_data, use_lock=True)
263
263
  else:
264
264
  running = True
265
265
  except (ValueError, TypeError):
@@ -589,8 +589,9 @@ async def get_logs(lines: int = 50):
589
589
  Get recent log lines from the session log.
590
590
 
591
591
  Args:
592
- lines: Number of lines to return (default 50)
592
+ lines: Number of lines to return (default 50, max 10000)
593
593
  """
594
+ lines = min(max(lines, 1), 10000)
594
595
  log_file = LOG_DIR / "session.log"
595
596
 
596
597
  if not log_file.exists():
@@ -14,7 +14,7 @@ import re
14
14
  import subprocess
15
15
  import tempfile
16
16
  import threading
17
- from dataclasses import asdict, dataclass, field
17
+ from dataclasses import asdict, dataclass, field, fields
18
18
  from datetime import datetime, timezone
19
19
  from pathlib import Path
20
20
  from typing import Any, Optional
@@ -38,8 +38,8 @@ class Feature:
38
38
  """Individual feature tracked during migration."""
39
39
 
40
40
  id: str
41
- category: str
42
- description: str
41
+ category: str = ""
42
+ description: str = ""
43
43
  verification_steps: list[str] = field(default_factory=list)
44
44
  passes: bool = False
45
45
  characterization_test: str = ""
@@ -52,8 +52,8 @@ class MigrationStep:
52
52
  """Single step in a migration plan."""
53
53
 
54
54
  id: str
55
- description: str
56
- type: str # e.g. "refactor", "rewrite", "config", "test"
55
+ description: str = ""
56
+ type: str = "" # e.g. "refactor", "rewrite", "config", "test"
57
57
  files: list[str] = field(default_factory=list)
58
58
  tests_required: list[str] = field(default_factory=list)
59
59
  estimated_tokens: int = 0
@@ -81,9 +81,14 @@ class SeamInfo:
81
81
  """Detected seam (boundary/interface) in the codebase."""
82
82
 
83
83
  id: str
84
- type: str # e.g. "api", "module", "database", "config"
85
- location: str
86
- description: str
84
+ description: str = ""
85
+ type: str = "" # e.g. "api", "module", "database", "config"
86
+ location: str = ""
87
+ name: str = ""
88
+ priority: str = "medium"
89
+ files: list[str] = field(default_factory=list)
90
+ dependencies: list[str] = field(default_factory=list)
91
+ complexity: str = ""
87
92
  confidence: float = 0.0
88
93
  suggested_interface: str = ""
89
94
 
@@ -353,9 +358,16 @@ class MigrationPipeline:
353
358
  features_path = self.migration_dir / "features.json"
354
359
  try:
355
360
  data = json.loads(features_path.read_text(encoding="utf-8"))
356
- features = [Feature(**f) for f in data]
361
+ # Handle both flat list and {"features": [...]} wrapper
362
+ if isinstance(data, dict):
363
+ data = data.get("features", [])
364
+ # Filter to known Feature fields to tolerate extra keys
365
+ _feature_fields = {f.name for f in fields(Feature)}
366
+ features = [Feature(**{k: v for k, v in f.items() if k in _feature_fields}) for f in data]
357
367
  except FileNotFoundError:
358
368
  return False, "Phase gate failed: features.json not found"
369
+ except (json.JSONDecodeError, TypeError) as exc:
370
+ return False, f"Phase gate failed: features.json is invalid: {exc}"
359
371
  if not features:
360
372
  return False, "No features defined"
361
373
  failing = [f for f in features if not f.passes]
@@ -369,11 +381,16 @@ class MigrationPipeline:
369
381
  plan_path = self.migration_dir / "migration-plan.json"
370
382
  try:
371
383
  data = json.loads(plan_path.read_text(encoding="utf-8"))
372
- steps_data = data.pop("steps", [])
373
- plan = MigrationPlan(**data)
374
- plan.steps = [MigrationStep(**s) for s in steps_data]
384
+ steps_data = data.get("steps", [])
385
+ _plan_fields = {f.name for f in fields(MigrationPlan)}
386
+ _step_fields = {f.name for f in fields(MigrationStep)}
387
+ plan_data = {k: v for k, v in data.items() if k in _plan_fields and k != "steps"}
388
+ plan = MigrationPlan(**plan_data)
389
+ plan.steps = [MigrationStep(**{k: v for k, v in s.items() if k in _step_fields}) for s in steps_data]
375
390
  except FileNotFoundError:
376
391
  return False, "Phase gate failed: migration-plan.json not found"
392
+ except (json.JSONDecodeError, TypeError) as exc:
393
+ return False, f"Phase gate failed: migration-plan.json is invalid: {exc}"
377
394
  incomplete = [s for s in plan.steps if s.status != "completed"]
378
395
  if incomplete:
379
396
  ids = ", ".join(s.id for s in incomplete[:5])
@@ -496,7 +513,12 @@ class MigrationPipeline:
496
513
  with self._lock:
497
514
  try:
498
515
  data = json.loads(features_path.read_text(encoding="utf-8"))
499
- return [Feature(**f) for f in data]
516
+ # Handle both flat list and {"features": [...]} wrapper
517
+ if isinstance(data, dict):
518
+ data = data.get("features", [])
519
+ # Filter to known Feature fields to tolerate extra keys
520
+ _feature_fields = {f.name for f in fields(Feature)}
521
+ return [Feature(**{k: v for k, v in f.items() if k in _feature_fields}) for f in data]
500
522
  except FileNotFoundError:
501
523
  logger.warning("Features file not found: %s", features_path)
502
524
  raise
@@ -523,9 +545,12 @@ class MigrationPipeline:
523
545
  try:
524
546
  data = json.loads(plan_path.read_text(encoding="utf-8"))
525
547
  # Reconstruct nested MigrationStep objects
526
- steps_data = data.pop("steps", [])
527
- plan = MigrationPlan(**data)
528
- plan.steps = [MigrationStep(**s) for s in steps_data]
548
+ steps_data = data.get("steps", [])
549
+ _plan_fields = {f.name for f in fields(MigrationPlan)}
550
+ _step_fields = {f.name for f in fields(MigrationStep)}
551
+ plan_data = {k: v for k, v in data.items() if k in _plan_fields and k != "steps"}
552
+ plan = MigrationPlan(**plan_data)
553
+ plan.steps = [MigrationStep(**{k: v for k, v in s.items() if k in _step_fields}) for s in steps_data]
529
554
  return plan
530
555
  except FileNotFoundError:
531
556
  logger.warning("Plan file not found: %s", plan_path)
@@ -550,7 +575,11 @@ class MigrationPipeline:
550
575
  with self._lock:
551
576
  try:
552
577
  data = json.loads(seams_path.read_text(encoding="utf-8"))
553
- return [SeamInfo(**s) for s in data]
578
+ # Handle both flat list and {"seams": [...]} wrapper
579
+ if isinstance(data, dict):
580
+ data = data.get("seams", [])
581
+ _seam_fields = {f.name for f in fields(SeamInfo)}
582
+ return [SeamInfo(**{k: v for k, v in s.items() if k in _seam_fields}) for s in data]
554
583
  except FileNotFoundError:
555
584
  logger.warning("Seams file not found: %s", seams_path)
556
585
  raise
@@ -55,7 +55,7 @@ from .activity_logger import get_activity_logger
55
55
  try:
56
56
  from . import __version__ as _version
57
57
  except ImportError:
58
- _version = "5.39.0"
58
+ _version = "5.58.1"
59
59
 
60
60
  # ---------------------------------------------------------------------------
61
61
  # TLS Configuration (optional - disabled by default)