loki-mode 6.14.0 → 6.15.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -9,7 +9,7 @@
9
9
  [![Agent Types](https://img.shields.io/badge/Agent%20Types-41-blue)]()
10
10
  [![Autonomi](https://img.shields.io/badge/Autonomi-autonomi.dev-5B4EEA)](https://www.autonomi.dev/)
11
11
 
12
- **Current Version: v6.14.0**
12
+ **Current Version: v6.15.0**
13
13
 
14
14
  ---
15
15
 
package/SKILL.md CHANGED
@@ -3,7 +3,7 @@ name: loki-mode
3
3
  description: Multi-agent autonomous startup system. Triggers on "Loki Mode". Takes PRD to deployed product with minimal human intervention. Requires --dangerously-skip-permissions flag.
4
4
  ---
5
5
 
6
- # Loki Mode v6.14.0
6
+ # Loki Mode v6.15.1
7
7
 
8
8
  **You are an autonomous agent. You make decisions. You do not ask questions. You do not stop.**
9
9
 
@@ -267,4 +267,4 @@ The following features are documented in skill modules but not yet fully automat
267
267
  | Quality gates 3-reviewer system | Implemented (v5.35.0) | 5 specialist reviewers in `skills/quality-gates.md`; execution in run.sh |
268
268
  | Benchmarks (HumanEval, SWE-bench) | Infrastructure only | Runner scripts and datasets exist in `benchmarks/`; no published results |
269
269
 
270
- **v6.14.0 | [Autonomi](https://www.autonomi.dev/) flagship product | ~260 lines core**
270
+ **v6.15.1 | [Autonomi](https://www.autonomi.dev/) flagship product | ~260 lines core**
package/VERSION CHANGED
@@ -1 +1 @@
1
- 6.14.0
1
+ 6.15.1
@@ -147,6 +147,7 @@ class BmadArtifacts:
147
147
  self.prd_path: Optional[Path] = None
148
148
  self.architecture_path: Optional[Path] = None
149
149
  self.epics_path: Optional[Path] = None
150
+ self.sprint_status_path: Optional[Path] = None
150
151
  self.output_dir: Optional[Path] = None
151
152
  self.errors: List[str] = []
152
153
  self._discover()
@@ -202,6 +203,11 @@ class BmadArtifacts:
202
203
  if epics_path.exists():
203
204
  self.epics_path = epics_path
204
205
 
206
+ # Find sprint-status.yml (optional)
207
+ sprint_path = self.output_dir / "sprint-status.yml"
208
+ if sprint_path.exists():
209
+ self.sprint_status_path = sprint_path
210
+
205
211
  @property
206
212
  def is_valid(self) -> bool:
207
213
  """True if at least a PRD was found."""
@@ -213,6 +219,7 @@ class BmadArtifacts:
213
219
  "prd": str(self.prd_path) if self.prd_path else None,
214
220
  "architecture": str(self.architecture_path) if self.architecture_path else None,
215
221
  "epics": str(self.epics_path) if self.epics_path else None,
222
+ "sprint_status": str(self.sprint_status_path) if self.sprint_status_path else None,
216
223
  }
217
224
 
218
225
 
@@ -438,6 +445,73 @@ def parse_epics(epics_path: Path) -> List[Dict[str, Any]]:
438
445
  return epics
439
446
 
440
447
 
448
+ # -- Sprint Status Parsing (stdlib-only YAML) ---------------------------------
449
+
450
+ def parse_sprint_status(path: Path) -> set:
451
+ """Parse sprint-status.yml and return a set of completed story names.
452
+
453
+ Uses a simple line-by-line parser for the specific BMAD sprint-status
454
+ format (no PyYAML dependency). Recognizes stories with status
455
+ 'completed' or 'done' (case-insensitive).
456
+
457
+ Expected format:
458
+ epics:
459
+ - name: "Epic Name"
460
+ status: in-progress
461
+ stories:
462
+ - name: "Story title"
463
+ status: completed
464
+ """
465
+ text = _safe_read(path)
466
+ completed: set = set()
467
+ current_name: Optional[str] = None
468
+ in_stories = False
469
+
470
+ for line in text.split("\n"):
471
+ stripped = line.strip()
472
+ if not stripped or stripped.startswith("#"):
473
+ continue
474
+
475
+ # Detect stories: block start
476
+ if stripped == "stories:":
477
+ in_stories = True
478
+ current_name = None
479
+ continue
480
+
481
+ # Detect epics: block start (reset stories context)
482
+ if stripped == "epics:":
483
+ in_stories = False
484
+ current_name = None
485
+ continue
486
+
487
+ # Top-level list item under epics resets stories context
488
+ # (indentation: " - name:" for epics vs " - name:" for stories)
489
+ indent = len(line) - len(line.lstrip())
490
+
491
+ if in_stories:
492
+ # Story name line: " - name: ..." or " name: ..."
493
+ name_match = re.match(r'^-?\s*name:\s*["\']?(.*?)["\']?\s*$', stripped)
494
+ if name_match:
495
+ current_name = name_match.group(1).strip()
496
+ continue
497
+
498
+ # Story status line
499
+ status_match = re.match(r'^status:\s*["\']?(.*?)["\']?\s*$', stripped)
500
+ if status_match:
501
+ status = status_match.group(1).strip().lower()
502
+ if status in ("completed", "done") and current_name:
503
+ completed.add(current_name)
504
+ current_name = None
505
+ continue
506
+
507
+ # A new epic-level item resets context
508
+ if indent <= 4 and stripped.startswith("- name:"):
509
+ in_stories = False
510
+ current_name = None
511
+
512
+ return completed
513
+
514
+
441
515
  # -- Architecture Summary -----------------------------------------------------
442
516
 
443
517
  def summarize_architecture(arch_path: Path) -> str:
@@ -593,6 +667,7 @@ def write_outputs(
593
667
  arch_summary: Optional[str],
594
668
  tasks_json: Optional[List[Dict[str, Any]]],
595
669
  validation_report: Optional[List[Dict[str, str]]],
670
+ completed_stories: Optional[set] = None,
596
671
  ) -> List[str]:
597
672
  """Write all output files to the specified directory.
598
673
 
@@ -633,6 +708,12 @@ def write_outputs(
633
708
  _write_atomic(val_path, "\n".join(val_lines) + "\n")
634
709
  written.append(str(val_path))
635
710
 
711
+ # bmad-completed-stories.json
712
+ if completed_stories:
713
+ completed_path = output_dir / "bmad-completed-stories.json"
714
+ _write_atomic(completed_path, json.dumps(sorted(completed_stories), indent=2))
715
+ written.append(str(completed_path))
716
+
636
717
  return written
637
718
 
638
719
 
@@ -674,6 +755,11 @@ def run(
674
755
  if artifacts.epics_path:
675
756
  epics_data = parse_epics(artifacts.epics_path)
676
757
 
758
+ # 4b. Parse sprint status (optional)
759
+ completed_stories: Optional[set] = None
760
+ if artifacts.sprint_status_path:
761
+ completed_stories = parse_sprint_status(artifacts.sprint_status_path)
762
+
677
763
  # 5. Build combined metadata
678
764
  combined_metadata: Dict[str, Any] = {
679
765
  "project_classification": classification,
@@ -714,6 +800,7 @@ def run(
714
800
  arch_summary=arch_summary,
715
801
  tasks_json=epics_data,
716
802
  validation_report=validation_report,
803
+ completed_stories=completed_stories,
717
804
  )
718
805
 
719
806
  print(f"BMAD adapter: processed {artifacts.prd_path}")
@@ -722,7 +809,10 @@ def run(
722
809
  print(f" Classification: {classification.get('project_type', 'unknown')} / {classification.get('complexity', 'unknown')}")
723
810
  print(f" Artifacts: PRD={'found' if artifacts.prd_path else 'MISSING'}, "
724
811
  f"Architecture={'found' if artifacts.architecture_path else 'missing'}, "
725
- f"Epics={'found' if artifacts.epics_path else 'missing'}")
812
+ f"Epics={'found' if artifacts.epics_path else 'missing'}, "
813
+ f"SprintStatus={'found' if artifacts.sprint_status_path else 'missing'}")
814
+ if completed_stories:
815
+ print(f" Completed stories (will skip): {len(completed_stories)}")
726
816
  print(f" Output files written to {abs_output_dir}/:")
727
817
  for path in written:
728
818
  print(f" - {Path(path).name}")
package/autonomy/loki CHANGED
@@ -756,7 +756,7 @@ cmd_start() {
756
756
 
757
757
  # Run the BMAD adapter to normalize artifacts
758
758
  echo -e "${CYAN}Running BMAD adapter...${NC}"
759
- local adapter_script="${SCRIPT_DIR:-$(dirname "$0")}/bmad-adapter.py"
759
+ local adapter_script="$(dirname "$(resolve_script_path "$0")")/bmad-adapter.py"
760
760
  if [[ ! -f "$adapter_script" ]]; then
761
761
  echo -e "${RED}Error: BMAD adapter not found at $adapter_script${NC}"
762
762
  echo "Please ensure autonomy/bmad-adapter.py exists."
@@ -808,7 +808,7 @@ cmd_start() {
808
808
 
809
809
  # Run the OpenSpec adapter to normalize artifacts
810
810
  echo -e "${CYAN}Running OpenSpec adapter...${NC}"
811
- local adapter_script="${SCRIPT_DIR:-$(dirname "$0")}/openspec-adapter.py"
811
+ local adapter_script="$(dirname "$(resolve_script_path "$0")")/openspec-adapter.py"
812
812
  if [[ ! -f "$adapter_script" ]]; then
813
813
  echo -e "${RED}Error: OpenSpec adapter not found at $adapter_script${NC}"
814
814
  echo "Please ensure autonomy/openspec-adapter.py exists."
package/autonomy/run.sh CHANGED
@@ -7267,6 +7267,83 @@ except Exception as e:
7267
7267
  PYEOF
7268
7268
  }
7269
7269
 
7270
+ # Automatic episode capture with enriched context (v6.15.0)
7271
+ # Captures git changes, files modified, and RARV phase automatically
7272
+ # after every iteration -- no manual invocation needed.
7273
+ auto_capture_episode() {
7274
+ local iteration="$1"
7275
+ local exit_code="$2"
7276
+ local rarv_phase="$3"
7277
+ local goal="$4"
7278
+ local duration="$5"
7279
+ local log_file="$6"
7280
+ local target_dir="${TARGET_DIR:-.}"
7281
+
7282
+ # Only capture if memory system exists
7283
+ if [ ! -d "$target_dir/.loki/memory" ]; then
7284
+ return
7285
+ fi
7286
+
7287
+ # Collect git context: files modified in this iteration
7288
+ local files_modified=""
7289
+ files_modified=$(cd "$target_dir" && git diff --name-only HEAD 2>/dev/null | head -20 | tr '\n' '|' || true)
7290
+
7291
+ # Collect last git commit if any
7292
+ local git_commit=""
7293
+ git_commit=$(cd "$target_dir" && git rev-parse --short HEAD 2>/dev/null || true)
7294
+
7295
+ # Determine outcome
7296
+ local outcome="success"
7297
+ if [ "$exit_code" -ne 0 ]; then
7298
+ outcome="failure"
7299
+ fi
7300
+
7301
+ # Pass all context via environment variables (prevents injection)
7302
+ _LOKI_PROJECT_DIR="$PROJECT_DIR" _LOKI_TARGET_DIR="$target_dir" \
7303
+ _LOKI_ITERATION="$iteration" _LOKI_EXIT_CODE="$exit_code" \
7304
+ _LOKI_RARV_PHASE="$rarv_phase" _LOKI_GOAL="$goal" \
7305
+ _LOKI_DURATION="$duration" _LOKI_OUTCOME="$outcome" \
7306
+ _LOKI_FILES_MODIFIED="$files_modified" _LOKI_GIT_COMMIT="$git_commit" \
7307
+ python3 << 'PYEOF' 2>/dev/null || true
7308
+ import sys
7309
+ import os
7310
+
7311
+ project_dir = os.environ.get('_LOKI_PROJECT_DIR', '')
7312
+ target_dir = os.environ.get('_LOKI_TARGET_DIR', '.')
7313
+ iteration = os.environ.get('_LOKI_ITERATION', '0')
7314
+ rarv_phase = os.environ.get('_LOKI_RARV_PHASE', 'iteration')
7315
+ goal = os.environ.get('_LOKI_GOAL', '')
7316
+ duration = os.environ.get('_LOKI_DURATION', '0')
7317
+ outcome = os.environ.get('_LOKI_OUTCOME', 'success')
7318
+ files_modified = os.environ.get('_LOKI_FILES_MODIFIED', '')
7319
+ git_commit = os.environ.get('_LOKI_GIT_COMMIT', '')
7320
+
7321
+ sys.path.insert(0, project_dir)
7322
+ try:
7323
+ from memory.engine import MemoryEngine, create_storage
7324
+ from memory.schemas import EpisodeTrace
7325
+
7326
+ storage = create_storage(f'{target_dir}/.loki/memory')
7327
+ engine = MemoryEngine(storage=storage, base_path=f'{target_dir}/.loki/memory')
7328
+ engine.initialize()
7329
+
7330
+ trace = EpisodeTrace.create(
7331
+ task_id=f'iteration-{iteration}',
7332
+ agent='loki-orchestrator',
7333
+ phase=rarv_phase.upper() if rarv_phase else 'ACT',
7334
+ goal=goal,
7335
+ )
7336
+ trace.outcome = outcome
7337
+ trace.duration_seconds = int(duration) if duration.isdigit() else 0
7338
+ trace.git_commit = git_commit if git_commit else None
7339
+ trace.files_modified = [f for f in files_modified.split('|') if f] if files_modified else []
7340
+
7341
+ engine.store_episode(trace)
7342
+ except Exception:
7343
+ pass # Silently fail -- memory capture must never break the loop
7344
+ PYEOF
7345
+ }
7346
+
7270
7347
  # Run memory consolidation pipeline
7271
7348
  run_memory_consolidation() {
7272
7349
  local target_dir="${TARGET_DIR:-.}"
@@ -7802,6 +7879,7 @@ import sys
7802
7879
 
7803
7880
  bmad_tasks_path = ".loki/bmad-tasks.json"
7804
7881
  pending_path = ".loki/queue/pending.json"
7882
+ completed_stories_path = ".loki/bmad-completed-stories.json"
7805
7883
 
7806
7884
  try:
7807
7885
  with open(bmad_tasks_path, "r") as f:
@@ -7810,6 +7888,17 @@ except (json.JSONDecodeError, FileNotFoundError) as e:
7810
7888
  print(f"Warning: Could not read BMAD tasks: {e}", file=sys.stderr)
7811
7889
  sys.exit(0)
7812
7890
 
7891
+ # Load completed stories from sprint-status (if available)
7892
+ completed_stories = set()
7893
+ if os.path.exists(completed_stories_path):
7894
+ try:
7895
+ with open(completed_stories_path, "r") as f:
7896
+ completed_list = json.load(f)
7897
+ if isinstance(completed_list, list):
7898
+ completed_stories = {s.lower() for s in completed_list if isinstance(s, str)}
7899
+ except (json.JSONDecodeError, FileNotFoundError):
7900
+ pass
7901
+
7813
7902
  # Extract stories from BMAD structure
7814
7903
  # Supports both flat list and nested epic/story format
7815
7904
  stories = []
@@ -7837,6 +7926,21 @@ if not stories:
7837
7926
  print("No BMAD stories found to queue", file=sys.stderr)
7838
7927
  sys.exit(0)
7839
7928
 
7929
+ # Filter out completed stories from sprint-status
7930
+ skipped_count = 0
7931
+ if completed_stories:
7932
+ filtered = []
7933
+ for story in stories:
7934
+ if isinstance(story, dict):
7935
+ title = story.get("title", story.get("name", "")).lower()
7936
+ if title and title in completed_stories:
7937
+ skipped_count += 1
7938
+ continue
7939
+ filtered.append(story)
7940
+ stories = filtered
7941
+ if skipped_count > 0:
7942
+ print(f"Skipped {skipped_count} completed stories (from sprint-status.yml)", file=sys.stderr)
7943
+
7840
7944
  # Load existing pending tasks (if any)
7841
7945
  existing = []
7842
7946
  if os.path.exists(pending_path):
@@ -7877,7 +7981,10 @@ for i, story in enumerate(stories):
7877
7981
  with open(pending_path, "w") as f:
7878
7982
  json.dump(existing, f, indent=2)
7879
7983
 
7880
- print(f"Added {len(stories)} BMAD stories to task queue")
7984
+ msg = f"Added {len(stories)} BMAD stories to task queue"
7985
+ if skipped_count > 0:
7986
+ msg += f" (skipped {skipped_count} completed)"
7987
+ print(msg)
7881
7988
  BMAD_QUEUE_EOF
7882
7989
 
7883
7990
  if [[ $? -ne 0 ]]; then
@@ -8614,13 +8721,15 @@ if __name__ == "__main__":
8614
8721
  fi
8615
8722
  fi
8616
8723
 
8724
+ # Automatic episode capture after every RARV iteration (v6.15.0)
8725
+ # Captures RARV phase, git changes, and iteration context automatically
8726
+ auto_capture_episode "$ITERATION_COUNT" "$exit_code" "${rarv_phase:-iteration}" \
8727
+ "${prd_path:-codebase-analysis}" "$duration" "$log_file"
8728
+
8617
8729
  # Check for success - ONLY stop on explicit completion promise
8618
8730
  # There's never a "complete" product - always improvements, bugs, features
8619
8731
  if [ $exit_code -eq 0 ]; then
8620
- # Store episode trace for successful iteration
8621
- local task_id="iteration-$ITERATION_COUNT"
8622
- local goal_desc="${prd_path:-codebase-analysis}"
8623
- store_episode_trace "$task_id" "success" "iteration" "$goal_desc" "$duration"
8732
+ # Episode trace already captured by auto_capture_episode above (v6.15.0)
8624
8733
 
8625
8734
  # Track iteration for Completion Council convergence detection
8626
8735
  if type council_track_iteration &>/dev/null; then
@@ -8673,10 +8782,7 @@ if __name__ == "__main__":
8673
8782
  fi
8674
8783
 
8675
8784
  # Only apply retry logic for ERRORS (non-zero exit code)
8676
- # Store episode trace for failed iteration (useful for learning from failures)
8677
- local task_id="iteration-$ITERATION_COUNT"
8678
- local goal_desc="${prd_path:-codebase-analysis}"
8679
- store_episode_trace "$task_id" "failure" "iteration" "$goal_desc" "$duration"
8785
+ # Episode trace already captured by auto_capture_episode above (v6.15.0)
8680
8786
 
8681
8787
  # Checkpoint failed iteration state (v5.57.0)
8682
8788
  create_checkpoint "iteration-${ITERATION_COUNT} failed (exit=$exit_code)" "iteration-${ITERATION_COUNT}-fail"
@@ -7,7 +7,7 @@ Modules:
7
7
  control: Session control API (start/stop/pause/resume)
8
8
  """
9
9
 
10
- __version__ = "6.14.0"
10
+ __version__ = "6.15.1"
11
11
 
12
12
  # Expose the control app for easy import
13
13
  try:
@@ -1622,15 +1622,51 @@ def _sanitize_agent_id(agent_id: str) -> str:
1622
1622
  @app.get("/api/memory/summary")
1623
1623
  async def get_memory_summary():
1624
1624
  """Get memory system summary from .loki/memory/."""
1625
+ # Try SQLite backend first for accurate counts
1626
+ storage = _get_memory_storage()
1627
+ if storage is not None:
1628
+ try:
1629
+ stats = storage.get_stats()
1630
+ summary = {
1631
+ "episodic": {"count": stats.get("episode_count", 0), "latestDate": None},
1632
+ "semantic": {"patterns": stats.get("pattern_count", 0), "antiPatterns": 0},
1633
+ "procedural": {"skills": stats.get("skill_count", 0)},
1634
+ "backend": "sqlite",
1635
+ }
1636
+ # Get latest episode date
1637
+ episode_ids = storage.list_episodes(limit=1)
1638
+ if episode_ids:
1639
+ ep = storage.load_episode(episode_ids[0])
1640
+ if ep:
1641
+ summary["episodic"]["latestDate"] = ep.get("timestamp", "")
1642
+ # Token economics from JSON (not in SQLite)
1643
+ econ_file = _get_loki_dir() / "memory" / "token_economics.json"
1644
+ if econ_file.exists():
1645
+ try:
1646
+ econ = json.loads(econ_file.read_text())
1647
+ summary["tokenEconomics"] = {
1648
+ "discoveryTokens": econ.get("discoveryTokens", 0),
1649
+ "readTokens": econ.get("readTokens", 0),
1650
+ "savingsPercent": econ.get("savingsPercent", 0),
1651
+ }
1652
+ except Exception:
1653
+ summary["tokenEconomics"] = {"discoveryTokens": 0, "readTokens": 0, "savingsPercent": 0}
1654
+ else:
1655
+ summary["tokenEconomics"] = {"discoveryTokens": 0, "readTokens": 0, "savingsPercent": 0}
1656
+ return summary
1657
+ except Exception:
1658
+ pass
1659
+
1660
+ # Fallback to JSON file-based counts
1625
1661
  memory_dir = _get_loki_dir() / "memory"
1626
1662
  summary = {
1627
1663
  "episodic": {"count": 0, "latestDate": None},
1628
1664
  "semantic": {"patterns": 0, "antiPatterns": 0},
1629
1665
  "procedural": {"skills": 0},
1630
1666
  "tokenEconomics": {"discoveryTokens": 0, "readTokens": 0, "savingsPercent": 0},
1667
+ "backend": "json",
1631
1668
  }
1632
1669
 
1633
- # Count episodic memories
1634
1670
  ep_dir = memory_dir / "episodic"
1635
1671
  if ep_dir.exists():
1636
1672
  episodes = sorted(ep_dir.glob("*.json"))
@@ -1642,7 +1678,6 @@ async def get_memory_summary():
1642
1678
  except Exception:
1643
1679
  pass
1644
1680
 
1645
- # Count semantic patterns
1646
1681
  sem_dir = memory_dir / "semantic"
1647
1682
  patterns_file = sem_dir / "patterns.json"
1648
1683
  anti_file = sem_dir / "anti-patterns.json"
@@ -1659,12 +1694,10 @@ async def get_memory_summary():
1659
1694
  except Exception:
1660
1695
  pass
1661
1696
 
1662
- # Count skills
1663
1697
  skills_dir = memory_dir / "skills"
1664
1698
  if skills_dir.exists():
1665
1699
  summary["procedural"]["skills"] = len(list(skills_dir.glob("*.json")))
1666
1700
 
1667
- # Token economics
1668
1701
  econ_file = memory_dir / "token_economics.json"
1669
1702
  if econ_file.exists():
1670
1703
  try:
@@ -1683,6 +1716,21 @@ async def get_memory_summary():
1683
1716
  @app.get("/api/memory/episodes")
1684
1717
  async def list_episodes(limit: int = Query(default=50, ge=1, le=1000)):
1685
1718
  """List episodic memory entries."""
1719
+ # Try SQLite backend first
1720
+ storage = _get_memory_storage()
1721
+ if storage is not None:
1722
+ try:
1723
+ ids = storage.list_episodes(limit=limit)
1724
+ episodes = []
1725
+ for eid in ids:
1726
+ ep = storage.load_episode(eid)
1727
+ if ep:
1728
+ episodes.append(ep)
1729
+ return episodes
1730
+ except Exception:
1731
+ pass
1732
+
1733
+ # Fallback to JSON files
1686
1734
  ep_dir = _get_loki_dir() / "memory" / "episodic"
1687
1735
  episodes = []
1688
1736
  if ep_dir.exists():
@@ -1698,11 +1746,21 @@ async def list_episodes(limit: int = Query(default=50, ge=1, le=1000)):
1698
1746
  @app.get("/api/memory/episodes/{episode_id}")
1699
1747
  async def get_episode(episode_id: str):
1700
1748
  """Get a specific episodic memory entry."""
1749
+ # Try SQLite first
1750
+ storage = _get_memory_storage()
1751
+ if storage is not None:
1752
+ try:
1753
+ ep = storage.load_episode(episode_id)
1754
+ if ep:
1755
+ return ep
1756
+ except Exception:
1757
+ pass
1758
+
1759
+ # Fallback to JSON files
1701
1760
  loki_dir = _get_loki_dir()
1702
1761
  ep_dir = loki_dir / "memory" / "episodic"
1703
1762
  if not ep_dir.exists():
1704
1763
  raise HTTPException(status_code=404, detail="Episode not found")
1705
- # Try direct filename match
1706
1764
  for f in ep_dir.glob("*.json"):
1707
1765
  resolved = os.path.realpath(f)
1708
1766
  if not resolved.startswith(os.path.realpath(str(loki_dir))):
@@ -1719,6 +1777,21 @@ async def get_episode(episode_id: str):
1719
1777
  @app.get("/api/memory/patterns")
1720
1778
  async def list_patterns():
1721
1779
  """List semantic patterns."""
1780
+ # Try SQLite first
1781
+ storage = _get_memory_storage()
1782
+ if storage is not None:
1783
+ try:
1784
+ ids = storage.list_patterns()
1785
+ patterns = []
1786
+ for pid in ids:
1787
+ p = storage.load_pattern(pid)
1788
+ if p:
1789
+ patterns.append(p)
1790
+ return patterns
1791
+ except Exception:
1792
+ pass
1793
+
1794
+ # Fallback to JSON
1722
1795
  sem_dir = _get_loki_dir() / "memory" / "semantic"
1723
1796
  patterns_file = sem_dir / "patterns.json"
1724
1797
  if patterns_file.exists():
@@ -1743,6 +1816,21 @@ async def get_pattern(pattern_id: str):
1743
1816
  @app.get("/api/memory/skills")
1744
1817
  async def list_skills():
1745
1818
  """List procedural skills."""
1819
+ # Try SQLite first
1820
+ storage = _get_memory_storage()
1821
+ if storage is not None:
1822
+ try:
1823
+ ids = storage.list_skills()
1824
+ skills = []
1825
+ for sid in ids:
1826
+ s = storage.load_skill(sid)
1827
+ if s:
1828
+ skills.append(s)
1829
+ return skills
1830
+ except Exception:
1831
+ pass
1832
+
1833
+ # Fallback to JSON
1746
1834
  skills_dir = _get_loki_dir() / "memory" / "skills"
1747
1835
  skills = []
1748
1836
  if skills_dir.exists():
@@ -1824,6 +1912,106 @@ async def get_memory_timeline():
1824
1912
  return {"entries": episodes, "lastUpdated": None}
1825
1913
 
1826
1914
 
1915
+ # ---------------------------------------------------------------------------
1916
+ # Memory Search & Stats (v6.15.0) - SQLite FTS5 powered
1917
+ # ---------------------------------------------------------------------------
1918
+
1919
+ def _get_memory_storage():
1920
+ """Get the best available memory storage backend (SQLite preferred)."""
1921
+ memory_dir = _get_loki_dir() / "memory"
1922
+ base_path = str(memory_dir)
1923
+ try:
1924
+ import sys
1925
+ project_root = str(_Path(__file__).resolve().parent.parent)
1926
+ if project_root not in sys.path:
1927
+ sys.path.insert(0, project_root)
1928
+ from memory.sqlite_storage import SQLiteMemoryStorage
1929
+ return SQLiteMemoryStorage(base_path=base_path)
1930
+ except Exception:
1931
+ return None
1932
+
1933
+
1934
+ @app.get("/api/memory/search")
1935
+ async def search_memory(
1936
+ q: str = Query(..., min_length=1, max_length=500, description="Search query"),
1937
+ collection: str = Query(default="all", regex="^(episodes|patterns|skills|all)$"),
1938
+ limit: int = Query(default=20, ge=1, le=100),
1939
+ ):
1940
+ """Full-text search across memory using FTS5."""
1941
+ storage = _get_memory_storage()
1942
+ if storage is None:
1943
+ return {"results": [], "message": "SQLite memory backend not available"}
1944
+
1945
+ try:
1946
+ results = storage.search_fts(q, collection=collection, limit=limit)
1947
+ compact = []
1948
+ for r in results:
1949
+ entry = {
1950
+ "id": r.get("id", ""),
1951
+ "type": r.get("_type", "unknown"),
1952
+ "summary": (
1953
+ r.get("goal", "") or
1954
+ r.get("pattern", "") or
1955
+ r.get("description", "") or
1956
+ r.get("name", "")
1957
+ )[:300],
1958
+ "score": round(r.get("_score", 0), 3),
1959
+ }
1960
+ if r.get("outcome"):
1961
+ entry["outcome"] = r["outcome"]
1962
+ if r.get("category"):
1963
+ entry["category"] = r["category"]
1964
+ if r.get("timestamp"):
1965
+ entry["timestamp"] = r["timestamp"]
1966
+ compact.append(entry)
1967
+ return {"results": compact, "count": len(compact), "query": q, "collection": collection}
1968
+ except Exception as e:
1969
+ raise HTTPException(status_code=500, detail=f"Search failed: {e}")
1970
+
1971
+
1972
+ @app.get("/api/memory/stats")
1973
+ async def get_memory_stats():
1974
+ """Get memory system statistics (counts, size, backend info)."""
1975
+ storage = _get_memory_storage()
1976
+ if storage is not None:
1977
+ try:
1978
+ return storage.get_stats()
1979
+ except Exception:
1980
+ pass
1981
+
1982
+ # Fallback: compute stats from JSON files
1983
+ memory_dir = _get_loki_dir() / "memory"
1984
+ ep_count = 0
1985
+ ep_dir = memory_dir / "episodic"
1986
+ if ep_dir.exists():
1987
+ for d in ep_dir.iterdir():
1988
+ if d.is_dir():
1989
+ ep_count += len(list(d.glob("*.json")))
1990
+ elif d.suffix == ".json":
1991
+ ep_count += 1
1992
+
1993
+ pat_count = 0
1994
+ patterns_file = memory_dir / "semantic" / "patterns.json"
1995
+ if patterns_file.exists():
1996
+ try:
1997
+ data = json.loads(patterns_file.read_text())
1998
+ pat_count = len(data) if isinstance(data, list) else len(data.get("patterns", []))
1999
+ except Exception:
2000
+ pass
2001
+
2002
+ skill_count = 0
2003
+ skills_dir = memory_dir / "skills"
2004
+ if skills_dir.exists():
2005
+ skill_count = len(list(skills_dir.glob("*.json")))
2006
+
2007
+ return {
2008
+ "backend": "json",
2009
+ "episode_count": ep_count,
2010
+ "pattern_count": pat_count,
2011
+ "skill_count": skill_count,
2012
+ }
2013
+
2014
+
1827
2015
  # Learning/metrics endpoints
1828
2016
 
1829
2017
 
@@ -2,7 +2,7 @@
2
2
 
3
3
  The flagship product of [Autonomi](https://www.autonomi.dev/). Complete installation instructions for all platforms and use cases.
4
4
 
5
- **Version:** v6.14.0
5
+ **Version:** v6.15.1
6
6
 
7
7
  ---
8
8
 
package/mcp/__init__.py CHANGED
@@ -57,4 +57,4 @@ try:
57
57
  except ImportError:
58
58
  __all__ = ['mcp']
59
59
 
60
- __version__ = '6.14.0'
60
+ __version__ = '6.15.1'
package/mcp/server.py CHANGED
@@ -1376,6 +1376,265 @@ async def loki_code_search_stats() -> str:
1376
1376
  return json.dumps({"error": str(e)})
1377
1377
 
1378
1378
 
1379
+ # ============================================================
1380
+ # MEMORY SEARCH TOOLS (v6.15.0) - SQLite FTS5 powered
1381
+ # ============================================================
1382
+
1383
+ @mcp.tool()
1384
+ async def mem_search(
1385
+ query: str,
1386
+ collection: str = "all",
1387
+ limit: int = 10,
1388
+ ) -> str:
1389
+ """
1390
+ Search memory using full-text search (FTS5).
1391
+
1392
+ Fast keyword search across all memory types. Supports AND, OR, NOT
1393
+ operators and prefix matching (e.g. "debug*").
1394
+
1395
+ Args:
1396
+ query: Search query (plain text or FTS5 syntax)
1397
+ collection: Which memories to search (episodes, patterns, skills, all)
1398
+ limit: Maximum results to return
1399
+
1400
+ Returns:
1401
+ JSON array of matching memories with relevance scores
1402
+ """
1403
+ _emit_tool_event_async(
1404
+ 'mem_search', 'start',
1405
+ parameters={'query': query, 'collection': collection, 'limit': limit}
1406
+ )
1407
+ try:
1408
+ base_path = safe_path_join('.loki', 'memory')
1409
+ if not os.path.exists(base_path):
1410
+ result = json.dumps({"results": [], "message": "Memory system not initialized"})
1411
+ _emit_tool_event_async('mem_search', 'complete', result_status='success')
1412
+ return result
1413
+
1414
+ # Try SQLite backend first (has FTS5), fall back to keyword search
1415
+ try:
1416
+ from memory.sqlite_storage import SQLiteMemoryStorage
1417
+ storage = SQLiteMemoryStorage(base_path)
1418
+ results = storage.search_fts(query, collection=collection, limit=limit)
1419
+ except (ImportError, Exception):
1420
+ # Fall back to retrieval-based search
1421
+ from memory.retrieval import MemoryRetrieval
1422
+ from memory.storage import MemoryStorage
1423
+ storage = MemoryStorage(base_path)
1424
+ retriever = MemoryRetrieval(storage)
1425
+ context = {"goal": query, "task_type": "exploration"}
1426
+ results = retriever.retrieve_task_aware(context, top_k=limit)
1427
+
1428
+ # Compact results for token efficiency
1429
+ compact = []
1430
+ for r in results:
1431
+ entry = {
1432
+ "id": r.get("id", ""),
1433
+ "type": r.get("_type", r.get("type", "unknown")),
1434
+ "summary": (
1435
+ r.get("goal", "") or
1436
+ r.get("pattern", "") or
1437
+ r.get("description", "") or
1438
+ r.get("name", "")
1439
+ )[:200],
1440
+ }
1441
+ if r.get("_score"):
1442
+ entry["score"] = round(r["_score"], 3)
1443
+ if r.get("outcome"):
1444
+ entry["outcome"] = r["outcome"]
1445
+ if r.get("category"):
1446
+ entry["category"] = r["category"]
1447
+ compact.append(entry)
1448
+
1449
+ result = json.dumps({"results": compact, "count": len(compact)}, default=str)
1450
+ _emit_tool_event_async('mem_search', 'complete', result_status='success')
1451
+ return result
1452
+ except PathTraversalError as e:
1453
+ logger.error(f"Path traversal attempt blocked: {e}")
1454
+ _emit_tool_event_async('mem_search', 'complete', result_status='error', error='Access denied')
1455
+ return json.dumps({"error": "Access denied", "results": []})
1456
+ except Exception as e:
1457
+ logger.error(f"mem_search failed: {e}")
1458
+ _emit_tool_event_async('mem_search', 'complete', result_status='error', error=str(e))
1459
+ return json.dumps({"error": str(e), "results": []})
1460
+
1461
+
1462
+ @mcp.tool()
1463
+ async def mem_timeline(
1464
+ around_id: str = "",
1465
+ limit: int = 20,
1466
+ since_hours: int = 24,
1467
+ ) -> str:
1468
+ """
1469
+ Get chronological context from memory timeline.
1470
+
1471
+ Shows recent actions, key decisions, and episode traces in time order.
1472
+ Use around_id to get context surrounding a specific memory entry.
1473
+
1474
+ Args:
1475
+ around_id: Optional memory ID to center the timeline around
1476
+ limit: Maximum timeline entries to return
1477
+ since_hours: Only show entries from the last N hours (default 24)
1478
+
1479
+ Returns:
1480
+ JSON timeline with actions and decisions
1481
+ """
1482
+ _emit_tool_event_async(
1483
+ 'mem_timeline', 'start',
1484
+ parameters={'around_id': around_id, 'limit': limit, 'since_hours': since_hours}
1485
+ )
1486
+ try:
1487
+ base_path = safe_path_join('.loki', 'memory')
1488
+ if not os.path.exists(base_path):
1489
+ result = json.dumps({"timeline": [], "message": "Memory system not initialized"})
1490
+ _emit_tool_event_async('mem_timeline', 'complete', result_status='success')
1491
+ return result
1492
+
1493
+ from datetime import timedelta
1494
+ cutoff = datetime.now(timezone.utc) - timedelta(hours=since_hours)
1495
+
1496
+ try:
1497
+ from memory.sqlite_storage import SQLiteMemoryStorage
1498
+ storage = SQLiteMemoryStorage(base_path)
1499
+
1500
+ # Get timeline actions
1501
+ timeline = storage.get_timeline()
1502
+ actions = timeline.get("recent_actions", [])[:limit]
1503
+
1504
+ # Get recent episodes for richer context
1505
+ episode_ids = storage.list_episodes(since=cutoff, limit=limit)
1506
+ episodes = []
1507
+ for eid in episode_ids:
1508
+ ep = storage.load_episode(eid)
1509
+ if ep:
1510
+ episodes.append({
1511
+ "id": ep.get("id"),
1512
+ "timestamp": ep.get("timestamp"),
1513
+ "phase": ep.get("phase"),
1514
+ "goal": (ep.get("goal", "") or "")[:150],
1515
+ "outcome": ep.get("outcome"),
1516
+ "duration_seconds": ep.get("duration_seconds"),
1517
+ "files_modified": ep.get("files_modified", [])[:5],
1518
+ })
1519
+
1520
+ except (ImportError, Exception):
1521
+ from memory.storage import MemoryStorage
1522
+ storage = MemoryStorage(base_path)
1523
+ timeline = storage.get_timeline()
1524
+ actions = timeline.get("recent_actions", [])[:limit]
1525
+
1526
+ episode_ids = storage.list_episodes(since=cutoff, limit=limit)
1527
+ episodes = []
1528
+ for eid in episode_ids:
1529
+ ep = storage.load_episode(eid)
1530
+ if ep:
1531
+ episodes.append({
1532
+ "id": ep.get("id"),
1533
+ "timestamp": ep.get("timestamp"),
1534
+ "phase": ep.get("phase"),
1535
+ "goal": (ep.get("goal", "") or "")[:150],
1536
+ "outcome": ep.get("outcome"),
1537
+ })
1538
+
1539
+ result = json.dumps({
1540
+ "actions": actions,
1541
+ "episodes": episodes,
1542
+ "decisions": timeline.get("key_decisions", [])[:10],
1543
+ "active_context": timeline.get("active_context", {}),
1544
+ }, default=str)
1545
+ _emit_tool_event_async('mem_timeline', 'complete', result_status='success')
1546
+ return result
1547
+ except PathTraversalError as e:
1548
+ logger.error(f"Path traversal attempt blocked: {e}")
1549
+ _emit_tool_event_async('mem_timeline', 'complete', result_status='error', error='Access denied')
1550
+ return json.dumps({"error": "Access denied", "timeline": []})
1551
+ except Exception as e:
1552
+ logger.error(f"mem_timeline failed: {e}")
1553
+ _emit_tool_event_async('mem_timeline', 'complete', result_status='error', error=str(e))
1554
+ return json.dumps({"error": str(e), "timeline": []})
1555
+
1556
+
1557
+ @mcp.tool()
1558
+ async def mem_get(
1559
+ ids: str,
1560
+ ) -> str:
1561
+ """
1562
+ Fetch full details for one or more memory entries by ID.
1563
+
1564
+ Use after mem_search to get complete data for specific results.
1565
+
1566
+ Args:
1567
+ ids: Comma-separated list of memory IDs to fetch
1568
+
1569
+ Returns:
1570
+ JSON object with full memory details keyed by ID
1571
+ """
1572
+ _emit_tool_event_async(
1573
+ 'mem_get', 'start',
1574
+ parameters={'ids': ids}
1575
+ )
1576
+ try:
1577
+ base_path = safe_path_join('.loki', 'memory')
1578
+ if not os.path.exists(base_path):
1579
+ result = json.dumps({"entries": {}, "message": "Memory system not initialized"})
1580
+ _emit_tool_event_async('mem_get', 'complete', result_status='success')
1581
+ return result
1582
+
1583
+ id_list = [i.strip() for i in ids.split(",") if i.strip()]
1584
+ if not id_list:
1585
+ return json.dumps({"entries": {}, "error": "No IDs provided"})
1586
+
1587
+ # Cap at 20 to prevent abuse
1588
+ id_list = id_list[:20]
1589
+
1590
+ try:
1591
+ from memory.sqlite_storage import SQLiteMemoryStorage
1592
+ storage = SQLiteMemoryStorage(base_path)
1593
+ except (ImportError, Exception):
1594
+ from memory.storage import MemoryStorage
1595
+ storage = MemoryStorage(base_path)
1596
+
1597
+ entries = {}
1598
+ for mem_id in id_list:
1599
+ mem_id = mem_id.strip()
1600
+ # Try each collection
1601
+ data = storage.load_episode(mem_id)
1602
+ if data:
1603
+ data["_type"] = "episode"
1604
+ entries[mem_id] = data
1605
+ continue
1606
+
1607
+ data = storage.load_pattern(mem_id)
1608
+ if data:
1609
+ data["_type"] = "pattern"
1610
+ entries[mem_id] = data
1611
+ continue
1612
+
1613
+ data = storage.load_skill(mem_id)
1614
+ if data:
1615
+ data["_type"] = "skill"
1616
+ entries[mem_id] = data
1617
+ continue
1618
+
1619
+ entries[mem_id] = None # Not found
1620
+
1621
+ result = json.dumps({
1622
+ "entries": entries,
1623
+ "found": sum(1 for v in entries.values() if v is not None),
1624
+ "total_requested": len(id_list),
1625
+ }, default=str)
1626
+ _emit_tool_event_async('mem_get', 'complete', result_status='success')
1627
+ return result
1628
+ except PathTraversalError as e:
1629
+ logger.error(f"Path traversal attempt blocked: {e}")
1630
+ _emit_tool_event_async('mem_get', 'complete', result_status='error', error='Access denied')
1631
+ return json.dumps({"error": "Access denied", "entries": {}})
1632
+ except Exception as e:
1633
+ logger.error(f"mem_get failed: {e}")
1634
+ _emit_tool_event_async('mem_get', 'complete', result_status='error', error=str(e))
1635
+ return json.dumps({"error": str(e), "entries": {}})
1636
+
1637
+
1379
1638
  # ============================================================
1380
1639
  # PROMPTS - Pre-built prompt templates
1381
1640
  # ============================================================
@@ -42,12 +42,18 @@ from .schemas import (
42
42
 
43
43
  from .storage import MemoryStorage, DEFAULT_NAMESPACE
44
44
 
45
+ try:
46
+ from .sqlite_storage import SQLiteMemoryStorage
47
+ except ImportError:
48
+ SQLiteMemoryStorage = None
49
+
45
50
  from .engine import (
46
51
  MemoryEngine,
47
52
  EpisodicMemory,
48
53
  SemanticMemory,
49
54
  ProceduralMemory,
50
55
  TASK_STRATEGIES,
56
+ create_storage,
51
57
  )
52
58
 
53
59
  from .retrieval import (
@@ -110,6 +116,8 @@ __all__ = [
110
116
  "ProceduralSkill",
111
117
  # Engine
112
118
  "MemoryStorage",
119
+ "SQLiteMemoryStorage",
120
+ "create_storage",
113
121
  "MemoryEngine",
114
122
  "EpisodicMemory",
115
123
  "SemanticMemory",
package/memory/engine.py CHANGED
@@ -1295,3 +1295,24 @@ class ProceduralMemory:
1295
1295
  def search(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:
1296
1296
  """Search skills by similarity."""
1297
1297
  return self._engine.retrieve_by_similarity(query, "procedural", top_k)
1298
+
1299
+
1300
+ def create_storage(base_path: str = ".loki/memory", namespace: Optional[str] = None):
1301
+ """
1302
+ Factory function to create the best available storage backend.
1303
+
1304
+ Tries SQLite+FTS5 first (faster search, single file), falls back to
1305
+ JSON-based MemoryStorage if SQLite initialization fails.
1306
+
1307
+ Args:
1308
+ base_path: Base path for memory data
1309
+ namespace: Optional namespace for project isolation
1310
+
1311
+ Returns:
1312
+ SQLiteMemoryStorage or MemoryStorage instance
1313
+ """
1314
+ try:
1315
+ from .sqlite_storage import SQLiteMemoryStorage
1316
+ return SQLiteMemoryStorage(base_path=base_path, namespace=namespace)
1317
+ except Exception:
1318
+ return MemoryStorage(base_path=base_path, namespace=namespace)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "loki-mode",
3
- "version": "6.14.0",
3
+ "version": "6.15.1",
4
4
  "description": "Loki Mode by Autonomi - Multi-agent autonomous startup system for Claude Code, Codex CLI, and Gemini CLI",
5
5
  "keywords": [
6
6
  "agent",